diff --git a/README.md b/README.md
index 98921798..7e913907 100644
--- a/README.md
+++ b/README.md
@@ -1,11 +1,38 @@
dp-dataset-api
==================
-A ONS API used to navigate datasets which are published.
+A ONS API used to navigate datasets, editions and versions - which are published.
+
+### Installation
#### Database
-* Run ```brew install mongo```
-* Run ```brew services start mongodb```
-* Run ```./scripts/InitDatabase.sh```
+* Run `brew install mongo`
+* Run `brew services start mongodb`
+* Run `./scripts/InitDatabase.sh`
+
+### State changes
+
+Normal sequential order of states:
+
+1. `created` (only on *instance*)
+2. `submitted` (only on *instance*)
+3. `completed` (only on *instance*)
+4. `edition-confirmed` (only on *instance* - this will create an *edition* and *version*,
+ in other words the *instance* will now be accessible by `version` endpoints).
+ Also the dataset `next` sub-document will also get updated here and so will the *edition*
+ (authorised users will see a different latest *version* link versus unauthorised users)
+5. `associated` (only on *version*) - dataset `next` sub-document will be updated again and so will the *edition*
+6. `published` (only on *version*) - both *edition* and *dataset* are updated - must not be changed
+
+There is the possibility to **rollback** from `associate` to `edition-confirmed`
+where a PST user has attached the _version_ to the wrong collection and so not only does
+the `collection_id` need to be updated with the new one (or removed all together)
+but the state will need to revert back to `edition-confirmed`.
+
+Lastly, **skipping a state**: it is possibly to jump from `edition-confirmed` to `published`
+as long as all the mandatory fields are there. There also might be a scenario whereby
+the state can change from `created` to `completed`, missing out the step to `submitted`
+due to race conditions, this is not expected to happen,
+the path to get to `completed` is longer than the `submitted` one.
### Healthcheck
@@ -24,10 +51,12 @@ one of:
| MONGODB_DATABASE | datasets | The MongoDB dataset database
| MONGODB_COLLECTION | datasets | MongoDB collection
| SECRET_KEY | FD0108EA-825D-411C-9B1D-41EF7727F465 | A secret key used authentication
-| CODE_LIST_API_URL | http://localhost:22400 | The host name for the Dataset API
-| DATASET_API_URL | http://localhost:22000 | The host name for the CodeList API
+| CODE_LIST_API_URL | http://localhost:22400 | The host name for the CodeList API
+| DATASET_API_URL | http://localhost:22000 | The host name for the Dataset API
+| GRACEFUL_SHUTDOWN_TIMEOUT | 5s | The graceful shutdown timeout in seconds
| WEBSITE_URL | http://localhost:20000 | The host name for the website
-| GRACEFUL_SHUTDOWN_TIMEOUT | 5s | The graceful shutdown timeout
+| KAFKA_ADDR | "localhost:9092" | The list of kafka hosts
+| GENERATE_DOWNLOADS_TOPIC | "filter-job-submitted" | The topic to send generate full dataset version downloads to
| HEALTHCHECK_TIMEOUT | 2s | The timeout that the healthcheck allows for checked subsystems
### Contributing
diff --git a/api/api.go b/api/api.go
index 245ee6aa..8c94a1e7 100644
--- a/api/api.go
+++ b/api/api.go
@@ -14,10 +14,18 @@ import (
"github.com/gorilla/mux"
)
-//go:generate moq -out test/api.go -pkg apitest . API
-
var httpServer *server.Server
+//API provides an interface for the routes
+type API interface {
+ CreateDatasetAPI(string, *mux.Router, store.DataStore) *DatasetAPI
+}
+
+// DownloadsGenerator pre generates full file downloads for the specified dataset/edition/version
+type DownloadsGenerator interface {
+ Generate(datasetID, instanceID, edition, version string) error
+}
+
// DatasetAPI manages importing filters against a dataset
type DatasetAPI struct {
dataStore store.DataStore
@@ -26,13 +34,14 @@ type DatasetAPI struct {
privateAuth *auth.Authenticator
router *mux.Router
urlBuilder *url.Builder
+ downloadGenerator DownloadsGenerator
healthCheckTimeout time.Duration
}
// CreateDatasetAPI manages all the routes configured to API
-func CreateDatasetAPI(host, bindAddr, secretKey string, dataStore store.DataStore, urlBuilder *url.Builder, errorChan chan error, healthCheckTimeout time.Duration) {
+func CreateDatasetAPI(host, bindAddr, secretKey string, dataStore store.DataStore, urlBuilder *url.Builder, errorChan chan error, downloadsGenerator DownloadsGenerator, healthCheckTimeout time.Duration) {
router := mux.NewRouter()
- routes(host, secretKey, router, dataStore, urlBuilder, healthCheckTimeout)
+ routes(host, secretKey, router, dataStore, urlBuilder, downloadsGenerator, healthCheckTimeout)
httpServer = server.New(bindAddr, router)
// Disable this here to allow main to manage graceful shutdown of the entire app.
@@ -47,7 +56,7 @@ func CreateDatasetAPI(host, bindAddr, secretKey string, dataStore store.DataStor
}()
}
-func routes(host, secretKey string, router *mux.Router, dataStore store.DataStore, urlBuilder *url.Builder, healthCheckTimeout time.Duration) *DatasetAPI {
+func routes(host, secretKey string, router *mux.Router, dataStore store.DataStore, urlBuilder *url.Builder, downloadGenerator DownloadsGenerator, healthCheckTimeout time.Duration) *DatasetAPI {
api := DatasetAPI{
privateAuth: &auth.Authenticator{SecretKey: secretKey, HeaderName: "internal-token"},
dataStore: dataStore,
@@ -55,6 +64,7 @@ func routes(host, secretKey string, router *mux.Router, dataStore store.DataStor
internalToken: secretKey,
router: router,
urlBuilder: urlBuilder,
+ downloadGenerator: downloadGenerator,
healthCheckTimeout: healthCheckTimeout,
}
@@ -80,6 +90,7 @@ func routes(host, secretKey string, router *mux.Router, dataStore store.DataStor
api.router.HandleFunc("/instances/{id}", api.privateAuth.Check(instance.Update)).Methods("PUT")
api.router.HandleFunc("/instances/{id}/events", api.privateAuth.Check(instance.AddEvent)).Methods("POST")
api.router.HandleFunc("/instances/{id}/inserted_observations/{inserted_observations}", api.privateAuth.Check(instance.UpdateObservations)).Methods("PUT")
+ api.router.HandleFunc("/instances/{id}/import_tasks", api.privateAuth.Check(instance.UpdateImportTask)).Methods("PUT")
dimension := dimension.Store{Storer: api.dataStore.Backend}
api.router.HandleFunc("/instances/{id}/dimensions", dimension.GetNodes).Methods("GET")
diff --git a/api/dataset.go b/api/dataset.go
index 02326aa1..e7891582 100644
--- a/api/dataset.go
+++ b/api/dataset.go
@@ -6,13 +6,12 @@ import (
"net/http"
"time"
- "gopkg.in/mgo.v2/bson"
-
- "github.com/ONSdigital/dp-dataset-api/models"
-
errs "github.com/ONSdigital/dp-dataset-api/apierrors"
+ "github.com/ONSdigital/dp-dataset-api/models"
"github.com/ONSdigital/go-ns/log"
"github.com/gorilla/mux"
+ "github.com/pkg/errors"
+ "gopkg.in/mgo.v2/bson"
)
const (
@@ -462,12 +461,26 @@ func (api *DatasetAPI) putVersion(w http.ResponseWriter, r *http.Request) {
}
}
- if versionDoc.State == models.AssociatedState {
+ if versionDoc.State == models.AssociatedState && currentVersion.State != models.AssociatedState {
if err := api.dataStore.Backend.UpdateDatasetWithAssociation(datasetID, versionDoc.State, versionDoc); err != nil {
log.ErrorC("failed to update dataset document after a version of a dataset has been associated with a collection", err, log.Data{"dataset_id": datasetID, "edition": edition, "version": version})
handleErrorType(versionDocType, err, w)
return
}
+
+ log.Info("generating full dataset version downloads", log.Data{"dataset_id": datasetID, "edition": edition, "version": version})
+
+ if err := api.downloadGenerator.Generate(datasetID, versionDoc.ID, edition, version); err != nil {
+ err = errors.Wrap(err, "error while attempting to generate full dataset version downloads")
+ log.Error(err, log.Data{
+ "dataset_id": datasetID,
+ "instance_id": versionDoc.ID,
+ "edition": edition,
+ "version": version,
+ })
+ // TODO - TECH DEBT - need to add an error event for this.
+ handleErrorType(versionDocType, err, w)
+ }
}
setJSONContentType(w)
@@ -566,6 +579,22 @@ func createNewVersionDoc(currentVersion *models.Version, version *models.Version
}
}
+ if version.Downloads == nil {
+ version.Downloads = currentVersion.Downloads
+ } else {
+ if version.Downloads.XLS == nil {
+ if currentVersion.Downloads != nil && currentVersion.Downloads.XLS != nil {
+ version.Downloads.XLS = currentVersion.Downloads.XLS
+ }
+ }
+
+ if version.Downloads.CSV == nil {
+ if currentVersion.Downloads != nil && currentVersion.Downloads.CSV != nil {
+ version.Downloads.CSV = currentVersion.Downloads.CSV
+ }
+ }
+ }
+
return version
}
diff --git a/api/dataset_test.go b/api/dataset_test.go
index 2b0de070..beee92e3 100644
--- a/api/dataset_test.go
+++ b/api/dataset_test.go
@@ -14,11 +14,15 @@ import (
"gopkg.in/mgo.v2/bson"
errs "github.com/ONSdigital/dp-dataset-api/apierrors"
+ "github.com/ONSdigital/dp-dataset-api/mocks"
"github.com/ONSdigital/dp-dataset-api/models"
"github.com/ONSdigital/dp-dataset-api/store"
"github.com/ONSdigital/dp-dataset-api/store/datastoretest"
+ "github.com/ONSdigital/go-ns/log"
+
"github.com/ONSdigital/dp-dataset-api/url"
"github.com/gorilla/mux"
+
. "github.com/smartystreets/goconvey/convey"
)
@@ -43,8 +47,8 @@ var (
)
// GetAPIWithMockedDatastore also used in other tests, so exported
-func GetAPIWithMockedDatastore(mockedDataStore store.Storer) *DatasetAPI {
- return routes(host, secretKey, mux.NewRouter(), store.DataStore{Backend: mockedDataStore}, urlBuilder, healthTimeout)
+func GetAPIWithMockedDatastore(mockedDataStore store.Storer, mockedGeneratedDownloads DownloadsGenerator) *DatasetAPI {
+ return routes(host, secretKey, mux.NewRouter(), store.DataStore{Backend: mockedDataStore}, urlBuilder, mockedGeneratedDownloads, healthTimeout)
}
func TestGetDatasetsReturnsOK(t *testing.T) {
@@ -58,7 +62,7 @@ func TestGetDatasetsReturnsOK(t *testing.T) {
},
}
- api := GetAPIWithMockedDatastore(mockedDataStore)
+ api := GetAPIWithMockedDatastore(mockedDataStore, &mocks.DownloadsGeneratorMock{})
api.router.ServeHTTP(w, r)
So(w.Code, ShouldEqual, http.StatusOK)
So(len(mockedDataStore.GetDatasetsCalls()), ShouldEqual, 1)
@@ -76,7 +80,7 @@ func TestGetDatasetsReturnsError(t *testing.T) {
},
}
- api := GetAPIWithMockedDatastore(mockedDataStore)
+ api := GetAPIWithMockedDatastore(mockedDataStore, &mocks.DownloadsGeneratorMock{})
api.router.ServeHTTP(w, r)
So(w.Code, ShouldEqual, http.StatusInternalServerError)
So(len(mockedDataStore.GetDatasetsCalls()), ShouldEqual, 1)
@@ -94,7 +98,7 @@ func TestGetDatasetReturnsOK(t *testing.T) {
},
}
- api := GetAPIWithMockedDatastore(mockedDataStore)
+ api := GetAPIWithMockedDatastore(mockedDataStore, &mocks.DownloadsGeneratorMock{})
api.router.ServeHTTP(w, r)
So(w.Code, ShouldEqual, http.StatusOK)
So(len(mockedDataStore.GetDatasetCalls()), ShouldEqual, 1)
@@ -110,7 +114,7 @@ func TestGetDatasetReturnsOK(t *testing.T) {
},
}
- api := GetAPIWithMockedDatastore(mockedDataStore)
+ api := GetAPIWithMockedDatastore(mockedDataStore, &mocks.DownloadsGeneratorMock{})
api.router.ServeHTTP(w, r)
So(w.Code, ShouldEqual, http.StatusOK)
So(len(mockedDataStore.GetDatasetCalls()), ShouldEqual, 1)
@@ -128,7 +132,7 @@ func TestGetDatasetReturnsError(t *testing.T) {
},
}
- api := GetAPIWithMockedDatastore(mockedDataStore)
+ api := GetAPIWithMockedDatastore(mockedDataStore, &mocks.DownloadsGeneratorMock{})
api.router.ServeHTTP(w, r)
So(w.Code, ShouldEqual, http.StatusInternalServerError)
So(len(mockedDataStore.GetDatasetCalls()), ShouldEqual, 1)
@@ -143,7 +147,7 @@ func TestGetDatasetReturnsError(t *testing.T) {
},
}
- api := GetAPIWithMockedDatastore(mockedDataStore)
+ api := GetAPIWithMockedDatastore(mockedDataStore, &mocks.DownloadsGeneratorMock{})
api.router.ServeHTTP(w, r)
So(w.Code, ShouldEqual, http.StatusNotFound)
So(len(mockedDataStore.GetDatasetCalls()), ShouldEqual, 1)
@@ -158,7 +162,7 @@ func TestGetDatasetReturnsError(t *testing.T) {
},
}
- api := GetAPIWithMockedDatastore(mockedDataStore)
+ api := GetAPIWithMockedDatastore(mockedDataStore, &mocks.DownloadsGeneratorMock{})
api.router.ServeHTTP(w, r)
So(w.Code, ShouldEqual, http.StatusNotFound)
So(len(mockedDataStore.GetDatasetCalls()), ShouldEqual, 1)
@@ -166,6 +170,7 @@ func TestGetDatasetReturnsError(t *testing.T) {
}
func TestGetEditionsReturnsOK(t *testing.T) {
+
t.Parallel()
Convey("", t, func() {
r := httptest.NewRequest("GET", "http://localhost:22000/datasets/123-456/editions", nil)
@@ -179,7 +184,7 @@ func TestGetEditionsReturnsOK(t *testing.T) {
},
}
- api := GetAPIWithMockedDatastore(mockedDataStore)
+ api := GetAPIWithMockedDatastore(mockedDataStore, &mocks.DownloadsGeneratorMock{})
api.router.ServeHTTP(w, r)
So(w.Code, ShouldEqual, http.StatusOK)
So(len(mockedDataStore.CheckDatasetExistsCalls()), ShouldEqual, 1)
@@ -198,7 +203,7 @@ func TestGetEditionsReturnsError(t *testing.T) {
},
}
- api := GetAPIWithMockedDatastore(mockedDataStore)
+ api := GetAPIWithMockedDatastore(mockedDataStore, &mocks.DownloadsGeneratorMock{})
api.router.ServeHTTP(w, r)
So(w.Code, ShouldEqual, http.StatusInternalServerError)
So(len(mockedDataStore.CheckDatasetExistsCalls()), ShouldEqual, 1)
@@ -215,7 +220,7 @@ func TestGetEditionsReturnsError(t *testing.T) {
},
}
- api := GetAPIWithMockedDatastore(mockedDataStore)
+ api := GetAPIWithMockedDatastore(mockedDataStore, &mocks.DownloadsGeneratorMock{})
api.router.ServeHTTP(w, r)
So(w.Code, ShouldEqual, http.StatusBadRequest)
So(len(mockedDataStore.CheckDatasetExistsCalls()), ShouldEqual, 1)
@@ -235,7 +240,7 @@ func TestGetEditionsReturnsError(t *testing.T) {
},
}
- api := GetAPIWithMockedDatastore(mockedDataStore)
+ api := GetAPIWithMockedDatastore(mockedDataStore, &mocks.DownloadsGeneratorMock{})
api.router.ServeHTTP(w, r)
So(w.Code, ShouldEqual, http.StatusNotFound)
So(len(mockedDataStore.CheckDatasetExistsCalls()), ShouldEqual, 1)
@@ -254,7 +259,7 @@ func TestGetEditionsReturnsError(t *testing.T) {
},
}
- api := GetAPIWithMockedDatastore(mockedDataStore)
+ api := GetAPIWithMockedDatastore(mockedDataStore, &mocks.DownloadsGeneratorMock{})
api.router.ServeHTTP(w, r)
So(w.Code, ShouldEqual, http.StatusNotFound)
So(len(mockedDataStore.CheckDatasetExistsCalls()), ShouldEqual, 1)
@@ -276,7 +281,7 @@ func TestGetEditionReturnsOK(t *testing.T) {
},
}
- api := GetAPIWithMockedDatastore(mockedDataStore)
+ api := GetAPIWithMockedDatastore(mockedDataStore, &mocks.DownloadsGeneratorMock{})
api.router.ServeHTTP(w, r)
So(w.Code, ShouldEqual, http.StatusOK)
So(len(mockedDataStore.CheckDatasetExistsCalls()), ShouldEqual, 1)
@@ -295,7 +300,7 @@ func TestGetEditionReturnsError(t *testing.T) {
},
}
- api := GetAPIWithMockedDatastore(mockedDataStore)
+ api := GetAPIWithMockedDatastore(mockedDataStore, &mocks.DownloadsGeneratorMock{})
api.router.ServeHTTP(w, r)
So(w.Code, ShouldEqual, http.StatusInternalServerError)
So(len(mockedDataStore.CheckDatasetExistsCalls()), ShouldEqual, 1)
@@ -312,7 +317,7 @@ func TestGetEditionReturnsError(t *testing.T) {
},
}
- api := GetAPIWithMockedDatastore(mockedDataStore)
+ api := GetAPIWithMockedDatastore(mockedDataStore, &mocks.DownloadsGeneratorMock{})
api.router.ServeHTTP(w, r)
So(w.Code, ShouldEqual, http.StatusBadRequest)
So(len(mockedDataStore.CheckDatasetExistsCalls()), ShouldEqual, 1)
@@ -332,7 +337,7 @@ func TestGetEditionReturnsError(t *testing.T) {
},
}
- api := GetAPIWithMockedDatastore(mockedDataStore)
+ api := GetAPIWithMockedDatastore(mockedDataStore, &mocks.DownloadsGeneratorMock{})
api.router.ServeHTTP(w, r)
So(w.Code, ShouldEqual, http.StatusNotFound)
So(len(mockedDataStore.CheckDatasetExistsCalls()), ShouldEqual, 1)
@@ -351,7 +356,7 @@ func TestGetEditionReturnsError(t *testing.T) {
},
}
- api := GetAPIWithMockedDatastore(mockedDataStore)
+ api := GetAPIWithMockedDatastore(mockedDataStore, &mocks.DownloadsGeneratorMock{})
api.router.ServeHTTP(w, r)
So(w.Code, ShouldEqual, http.StatusNotFound)
So(len(mockedDataStore.CheckDatasetExistsCalls()), ShouldEqual, 1)
@@ -376,7 +381,7 @@ func TestGetVersionsReturnsOK(t *testing.T) {
},
}
- api := GetAPIWithMockedDatastore(mockedDataStore)
+ api := GetAPIWithMockedDatastore(mockedDataStore, &mocks.DownloadsGeneratorMock{})
api.router.ServeHTTP(w, r)
So(w.Code, ShouldEqual, http.StatusOK)
So(len(mockedDataStore.CheckDatasetExistsCalls()), ShouldEqual, 1)
@@ -396,7 +401,7 @@ func TestGetVersionsReturnsError(t *testing.T) {
},
}
- api := GetAPIWithMockedDatastore(mockedDataStore)
+ api := GetAPIWithMockedDatastore(mockedDataStore, &mocks.DownloadsGeneratorMock{})
api.router.ServeHTTP(w, r)
So(w.Code, ShouldEqual, http.StatusInternalServerError)
So(len(mockedDataStore.CheckDatasetExistsCalls()), ShouldEqual, 1)
@@ -413,7 +418,7 @@ func TestGetVersionsReturnsError(t *testing.T) {
},
}
- api := GetAPIWithMockedDatastore(mockedDataStore)
+ api := GetAPIWithMockedDatastore(mockedDataStore, &mocks.DownloadsGeneratorMock{})
api.router.ServeHTTP(w, r)
So(w.Code, ShouldEqual, http.StatusBadRequest)
So(len(mockedDataStore.CheckDatasetExistsCalls()), ShouldEqual, 1)
@@ -433,7 +438,7 @@ func TestGetVersionsReturnsError(t *testing.T) {
},
}
- api := GetAPIWithMockedDatastore(mockedDataStore)
+ api := GetAPIWithMockedDatastore(mockedDataStore, &mocks.DownloadsGeneratorMock{})
api.router.ServeHTTP(w, r)
So(w.Code, ShouldEqual, http.StatusBadRequest)
So(len(mockedDataStore.CheckDatasetExistsCalls()), ShouldEqual, 1)
@@ -457,7 +462,7 @@ func TestGetVersionsReturnsError(t *testing.T) {
},
}
- api := GetAPIWithMockedDatastore(mockedDataStore)
+ api := GetAPIWithMockedDatastore(mockedDataStore, &mocks.DownloadsGeneratorMock{})
api.router.ServeHTTP(w, r)
So(w.Code, ShouldEqual, http.StatusNotFound)
So(len(mockedDataStore.CheckDatasetExistsCalls()), ShouldEqual, 1)
@@ -480,7 +485,7 @@ func TestGetVersionsReturnsError(t *testing.T) {
},
}
- api := GetAPIWithMockedDatastore(mockedDataStore)
+ api := GetAPIWithMockedDatastore(mockedDataStore, &mocks.DownloadsGeneratorMock{})
api.router.ServeHTTP(w, r)
So(w.Code, ShouldEqual, http.StatusNotFound)
So(len(mockedDataStore.CheckDatasetExistsCalls()), ShouldEqual, 1)
@@ -514,7 +519,7 @@ func TestGetVersionReturnsOK(t *testing.T) {
},
}
- api := GetAPIWithMockedDatastore(mockedDataStore)
+ api := GetAPIWithMockedDatastore(mockedDataStore, &mocks.DownloadsGeneratorMock{})
api.router.ServeHTTP(w, r)
So(w.Code, ShouldEqual, http.StatusOK)
So(len(mockedDataStore.CheckDatasetExistsCalls()), ShouldEqual, 1)
@@ -534,7 +539,7 @@ func TestGetVersionReturnsError(t *testing.T) {
},
}
- api := GetAPIWithMockedDatastore(mockedDataStore)
+ api := GetAPIWithMockedDatastore(mockedDataStore, &mocks.DownloadsGeneratorMock{})
api.router.ServeHTTP(w, r)
So(w.Code, ShouldEqual, http.StatusInternalServerError)
So(len(mockedDataStore.CheckDatasetExistsCalls()), ShouldEqual, 1)
@@ -550,7 +555,7 @@ func TestGetVersionReturnsError(t *testing.T) {
},
}
- api := GetAPIWithMockedDatastore(mockedDataStore)
+ api := GetAPIWithMockedDatastore(mockedDataStore, &mocks.DownloadsGeneratorMock{})
api.router.ServeHTTP(w, r)
So(w.Code, ShouldEqual, http.StatusBadRequest)
So(len(mockedDataStore.CheckDatasetExistsCalls()), ShouldEqual, 1)
@@ -571,7 +576,7 @@ func TestGetVersionReturnsError(t *testing.T) {
},
}
- api := GetAPIWithMockedDatastore(mockedDataStore)
+ api := GetAPIWithMockedDatastore(mockedDataStore, &mocks.DownloadsGeneratorMock{})
api.router.ServeHTTP(w, r)
So(w.Code, ShouldEqual, http.StatusBadRequest)
So(len(mockedDataStore.CheckDatasetExistsCalls()), ShouldEqual, 1)
@@ -595,7 +600,7 @@ func TestGetVersionReturnsError(t *testing.T) {
},
}
- api := GetAPIWithMockedDatastore(mockedDataStore)
+ api := GetAPIWithMockedDatastore(mockedDataStore, &mocks.DownloadsGeneratorMock{})
api.router.ServeHTTP(w, r)
So(w.Code, ShouldEqual, http.StatusNotFound)
So(len(mockedDataStore.CheckDatasetExistsCalls()), ShouldEqual, 1)
@@ -618,7 +623,7 @@ func TestGetVersionReturnsError(t *testing.T) {
},
}
- api := GetAPIWithMockedDatastore(mockedDataStore)
+ api := GetAPIWithMockedDatastore(mockedDataStore, &mocks.DownloadsGeneratorMock{})
api.router.ServeHTTP(w, r)
So(w.Code, ShouldEqual, http.StatusNotFound)
So(len(mockedDataStore.CheckDatasetExistsCalls()), ShouldEqual, 1)
@@ -645,7 +650,7 @@ func TestPostDatasetsReturnsCreated(t *testing.T) {
}
mockedDataStore.UpsertDataset("123", &models.DatasetUpdate{})
- api := GetAPIWithMockedDatastore(mockedDataStore)
+ api := GetAPIWithMockedDatastore(mockedDataStore, &mocks.DownloadsGeneratorMock{})
api.router.ServeHTTP(w, r)
So(w.Code, ShouldEqual, http.StatusCreated)
So(len(mockedDataStore.GetDatasetCalls()), ShouldEqual, 1)
@@ -670,7 +675,7 @@ func TestPostDatasetReturnsError(t *testing.T) {
},
}
- api := GetAPIWithMockedDatastore(mockedDataStore)
+ api := GetAPIWithMockedDatastore(mockedDataStore, &mocks.DownloadsGeneratorMock{})
api.router.ServeHTTP(w, r)
So(w.Code, ShouldEqual, http.StatusBadRequest)
So(len(mockedDataStore.GetDatasetCalls()), ShouldEqual, 1)
@@ -692,7 +697,7 @@ func TestPostDatasetReturnsError(t *testing.T) {
},
}
- api := GetAPIWithMockedDatastore(mockedDataStore)
+ api := GetAPIWithMockedDatastore(mockedDataStore, &mocks.DownloadsGeneratorMock{})
api.router.ServeHTTP(w, r)
So(w.Code, ShouldEqual, http.StatusInternalServerError)
So(len(mockedDataStore.GetDatasetCalls()), ShouldEqual, 1)
@@ -713,7 +718,7 @@ func TestPostDatasetReturnsError(t *testing.T) {
},
}
- api := GetAPIWithMockedDatastore(mockedDataStore)
+ api := GetAPIWithMockedDatastore(mockedDataStore, &mocks.DownloadsGeneratorMock{})
api.router.ServeHTTP(w, r)
So(w.Code, ShouldEqual, http.StatusUnauthorized)
So(len(mockedDataStore.GetDatasetCalls()), ShouldEqual, 0)
@@ -739,7 +744,7 @@ func TestPostDatasetReturnsError(t *testing.T) {
},
}
- api := GetAPIWithMockedDatastore(mockedDataStore)
+ api := GetAPIWithMockedDatastore(mockedDataStore, &mocks.DownloadsGeneratorMock{})
api.router.ServeHTTP(w, r)
So(w.Code, ShouldEqual, http.StatusForbidden)
So(len(mockedDataStore.GetDatasetCalls()), ShouldEqual, 1)
@@ -767,7 +772,7 @@ func TestPutDatasetReturnsSuccessfully(t *testing.T) {
}
mockedDataStore.UpdateDataset("123", dataset)
- api := GetAPIWithMockedDatastore(mockedDataStore)
+ api := GetAPIWithMockedDatastore(mockedDataStore, &mocks.DownloadsGeneratorMock{})
api.router.ServeHTTP(w, r)
So(w.Code, ShouldEqual, http.StatusOK)
So(len(mockedDataStore.UpdateDatasetCalls()), ShouldEqual, 2)
@@ -783,13 +788,14 @@ func TestPutDatasetReturnsError(t *testing.T) {
r.Header.Add("internal-token", "coffee")
So(err, ShouldBeNil)
w := httptest.NewRecorder()
+
mockedDataStore := &storetest.StorerMock{
UpdateDatasetFunc: func(string, *models.Dataset) error {
return errBadRequest
},
}
- api := GetAPIWithMockedDatastore(mockedDataStore)
+ api := GetAPIWithMockedDatastore(mockedDataStore, &mocks.DownloadsGeneratorMock{})
api.router.ServeHTTP(w, r)
So(w.Code, ShouldEqual, http.StatusBadRequest)
So(len(mockedDataStore.UpsertVersionCalls()), ShouldEqual, 0)
@@ -802,6 +808,7 @@ func TestPutDatasetReturnsError(t *testing.T) {
r.Header.Add("internal-token", "coffee")
So(err, ShouldBeNil)
w := httptest.NewRecorder()
+
mockedDataStore := &storetest.StorerMock{
UpdateDatasetFunc: func(string, *models.Dataset) error {
return errInternal
@@ -813,7 +820,7 @@ func TestPutDatasetReturnsError(t *testing.T) {
}
mockedDataStore.UpdateDataset("123", dataset)
- api := GetAPIWithMockedDatastore(mockedDataStore)
+ api := GetAPIWithMockedDatastore(mockedDataStore, &mocks.DownloadsGeneratorMock{})
api.router.ServeHTTP(w, r)
So(w.Code, ShouldEqual, http.StatusInternalServerError)
So(len(mockedDataStore.UpdateDatasetCalls()), ShouldEqual, 2)
@@ -826,6 +833,7 @@ func TestPutDatasetReturnsError(t *testing.T) {
r.Header.Add("internal-token", "coffee")
So(err, ShouldBeNil)
w := httptest.NewRecorder()
+
mockedDataStore := &storetest.StorerMock{
UpdateDatasetFunc: func(string, *models.Dataset) error {
return errs.ErrDatasetNotFound
@@ -837,7 +845,7 @@ func TestPutDatasetReturnsError(t *testing.T) {
}
mockedDataStore.UpdateDataset("123", dataset)
- api := GetAPIWithMockedDatastore(mockedDataStore)
+ api := GetAPIWithMockedDatastore(mockedDataStore, &mocks.DownloadsGeneratorMock{})
api.router.ServeHTTP(w, r)
So(w.Code, ShouldEqual, http.StatusNotFound)
So(len(mockedDataStore.UpdateDatasetCalls()), ShouldEqual, 2)
@@ -849,13 +857,14 @@ func TestPutDatasetReturnsError(t *testing.T) {
r, err := http.NewRequest("PUT", "http://localhost:22000/datasets/123", bytes.NewBufferString(b))
So(err, ShouldBeNil)
w := httptest.NewRecorder()
+
mockedDataStore := &storetest.StorerMock{
UpdateDatasetFunc: func(string, *models.Dataset) error {
return nil
},
}
- api := GetAPIWithMockedDatastore(mockedDataStore)
+ api := GetAPIWithMockedDatastore(mockedDataStore, &mocks.DownloadsGeneratorMock{})
api.router.ServeHTTP(w, r)
So(w.Code, ShouldEqual, http.StatusUnauthorized)
So(len(mockedDataStore.UpdateDatasetCalls()), ShouldEqual, 0)
@@ -865,12 +874,19 @@ func TestPutDatasetReturnsError(t *testing.T) {
func TestPutVersionReturnsSuccessfully(t *testing.T) {
t.Parallel()
Convey("When state is unchanged", t, func() {
+ generatorMock := &mocks.DownloadsGeneratorMock{
+ GenerateFunc: func(string, string, string, string) error {
+ return nil
+ },
+ }
+
var b string
b = versionPayload
r, err := http.NewRequest("PUT", "http://localhost:22000/datasets/123/editions/2017/versions/1", bytes.NewBufferString(b))
r.Header.Add("internal-token", "coffee")
So(err, ShouldBeNil)
w := httptest.NewRecorder()
+
mockedDataStore := &storetest.StorerMock{
CheckDatasetExistsFunc: func(string, string) error {
return nil
@@ -908,8 +924,9 @@ func TestPutVersionReturnsSuccessfully(t *testing.T) {
mockedDataStore.GetVersion("123", "2017", "1", "")
mockedDataStore.UpdateVersion("a1b2c3", &models.Version{})
- api := GetAPIWithMockedDatastore(mockedDataStore)
+ api := GetAPIWithMockedDatastore(mockedDataStore, &mocks.DownloadsGeneratorMock{})
api.router.ServeHTTP(w, r)
+
So(w.Code, ShouldEqual, http.StatusOK)
So(len(mockedDataStore.CheckDatasetExistsCalls()), ShouldEqual, 1)
So(len(mockedDataStore.CheckEditionExistsCalls()), ShouldEqual, 1)
@@ -918,15 +935,23 @@ func TestPutVersionReturnsSuccessfully(t *testing.T) {
So(len(mockedDataStore.UpdateEditionCalls()), ShouldEqual, 0)
So(len(mockedDataStore.UpsertDatasetCalls()), ShouldEqual, 0)
So(len(mockedDataStore.UpdateDatasetWithAssociationCalls()), ShouldEqual, 0)
+ So(len(generatorMock.GenerateCalls()), ShouldEqual, 0)
})
Convey("When state is set to associated", t, func() {
+ generatorMock := &mocks.DownloadsGeneratorMock{
+ GenerateFunc: func(string, string, string, string) error {
+ return nil
+ },
+ }
+
var b string
b = versionAssociatedPayload
r, err := http.NewRequest("PUT", "http://localhost:22000/datasets/123/editions/2017/versions/1", bytes.NewBufferString(b))
r.Header.Add("internal-token", "coffee")
So(err, ShouldBeNil)
w := httptest.NewRecorder()
+
mockedDataStore := &storetest.StorerMock{
CheckDatasetExistsFunc: func(string, string) error {
return nil
@@ -950,25 +975,94 @@ func TestPutVersionReturnsSuccessfully(t *testing.T) {
mockedDataStore.UpdateVersion("a1b2c3", &models.Version{})
mockedDataStore.UpdateDatasetWithAssociation("123", models.AssociatedState, &models.Version{})
- api := GetAPIWithMockedDatastore(mockedDataStore)
+ api := GetAPIWithMockedDatastore(mockedDataStore, &mocks.DownloadsGeneratorMock{})
api.router.ServeHTTP(w, r)
+
So(w.Code, ShouldEqual, http.StatusOK)
So(len(mockedDataStore.CheckDatasetExistsCalls()), ShouldEqual, 1)
So(len(mockedDataStore.CheckEditionExistsCalls()), ShouldEqual, 1)
So(len(mockedDataStore.GetVersionCalls()), ShouldEqual, 2)
So(len(mockedDataStore.UpdateVersionCalls()), ShouldEqual, 2)
- So(len(mockedDataStore.UpdateDatasetWithAssociationCalls()), ShouldEqual, 2)
+ So(len(mockedDataStore.UpdateDatasetWithAssociationCalls()), ShouldEqual, 1)
+ So(len(mockedDataStore.UpdateEditionCalls()), ShouldEqual, 0)
+ So(len(mockedDataStore.UpsertDatasetCalls()), ShouldEqual, 0)
+ So(len(generatorMock.GenerateCalls()), ShouldEqual, 0)
+ })
+
+ Convey("When state is set to edition-confirmed", t, func() {
+ downloadsGenerated := make(chan bool, 1)
+
+ generatorMock := &mocks.DownloadsGeneratorMock{
+ GenerateFunc: func(string, string, string, string) error {
+ downloadsGenerated <- true
+ return nil
+ },
+ }
+
+ var b string
+ b = versionAssociatedPayload
+ r, err := http.NewRequest("PUT", "http://localhost:22000/datasets/123/editions/2017/versions/1", bytes.NewBufferString(b))
+ r.Header.Add("internal-token", "coffee")
+ So(err, ShouldBeNil)
+ w := httptest.NewRecorder()
+
+ mockedDataStore := &storetest.StorerMock{
+ CheckDatasetExistsFunc: func(string, string) error {
+ return nil
+ },
+ CheckEditionExistsFunc: func(string, string, string) error {
+ return nil
+ },
+ GetVersionFunc: func(string, string, string, string) (*models.Version, error) {
+ return &models.Version{
+ State: models.EditionConfirmedState,
+ }, nil
+ },
+ UpdateVersionFunc: func(string, *models.Version) error {
+ return nil
+ },
+ UpdateDatasetWithAssociationFunc: func(string, string, *models.Version) error {
+ return nil
+ },
+ }
+
+ api := GetAPIWithMockedDatastore(mockedDataStore, generatorMock)
+ api.router.ServeHTTP(w, r)
+
+ select {
+ case <-downloadsGenerated:
+ log.Info("download generated as expected", nil)
+ case <-time.After(time.Second * 10):
+ err := errors.New("failing test due to timeout")
+ log.Error(err, nil)
+ t.Fail()
+ }
+
+ So(w.Code, ShouldEqual, http.StatusOK)
+ So(len(mockedDataStore.CheckDatasetExistsCalls()), ShouldEqual, 1)
+ So(len(mockedDataStore.CheckEditionExistsCalls()), ShouldEqual, 1)
+ So(len(mockedDataStore.GetVersionCalls()), ShouldEqual, 1)
+ So(len(mockedDataStore.UpdateVersionCalls()), ShouldEqual, 1)
+ So(len(mockedDataStore.UpdateDatasetWithAssociationCalls()), ShouldEqual, 1)
So(len(mockedDataStore.UpdateEditionCalls()), ShouldEqual, 0)
So(len(mockedDataStore.UpsertDatasetCalls()), ShouldEqual, 0)
+ So(len(generatorMock.GenerateCalls()), ShouldEqual, 1)
})
Convey("When state is set to published", t, func() {
+ generatorMock := &mocks.DownloadsGeneratorMock{
+ GenerateFunc: func(string, string, string, string) error {
+ return nil
+ },
+ }
+
var b string
b = versionPublishedPayload
r, err := http.NewRequest("PUT", "http://localhost:22000/datasets/123/editions/2017/versions/1", bytes.NewBufferString(b))
r.Header.Add("internal-token", "coffee")
So(err, ShouldBeNil)
w := httptest.NewRecorder()
+
mockedDataStore := &storetest.StorerMock{
CheckDatasetExistsFunc: func(string, string) error {
return nil
@@ -1025,7 +1119,7 @@ func TestPutVersionReturnsSuccessfully(t *testing.T) {
mockedDataStore.GetDataset("123")
mockedDataStore.UpsertDataset("123", &models.DatasetUpdate{})
- api := GetAPIWithMockedDatastore(mockedDataStore)
+ api := GetAPIWithMockedDatastore(mockedDataStore, generatorMock)
api.router.ServeHTTP(w, r)
So(w.Code, ShouldEqual, http.StatusOK)
So(len(mockedDataStore.CheckDatasetExistsCalls()), ShouldEqual, 1)
@@ -1036,12 +1130,190 @@ func TestPutVersionReturnsSuccessfully(t *testing.T) {
So(len(mockedDataStore.GetDatasetCalls()), ShouldEqual, 2)
So(len(mockedDataStore.UpsertDatasetCalls()), ShouldEqual, 2)
So(len(mockedDataStore.UpdateDatasetWithAssociationCalls()), ShouldEqual, 0)
+ So(len(generatorMock.GenerateCalls()), ShouldEqual, 0)
+ })
+}
+
+func TestPutVersionGenerateDownloadsError(t *testing.T) {
+ Convey("given download generator returns an error", t, func() {
+
+ mockedErr := errors.New("spectacular explosion")
+ var v models.Version
+ json.Unmarshal([]byte(versionAssociatedPayload), &v)
+ v.State = models.EditionConfirmedState
+
+ mockedDataStore := &storetest.StorerMock{
+ GetVersionFunc: func(datasetID string, editionID string, version string, state string) (*models.Version, error) {
+ return &v, nil
+ },
+ CheckDatasetExistsFunc: func(ID string, state string) error {
+ return nil
+ },
+ CheckEditionExistsFunc: func(ID string, editionID string, state string) error {
+ return nil
+ },
+ UpdateVersionFunc: func(ID string, version *models.Version) error {
+ return nil
+ },
+ UpdateDatasetWithAssociationFunc: func(ID string, state string, version *models.Version) error {
+ return nil
+ },
+ }
+
+ mockDownloadGenerator := &mocks.DownloadsGeneratorMock{
+ GenerateFunc: func(string, string, string, string) error {
+ return mockedErr
+ },
+ }
+
+ Convey("when put version is called with a valid request", func() {
+ r, _ := http.NewRequest("PUT", "http://localhost:22000/datasets/123/editions/2017/versions/1", bytes.NewBufferString(versionAssociatedPayload))
+ r.Header.Add("internal-token", "coffee")
+ w := httptest.NewRecorder()
+
+ api := routes(host, secretKey, mux.NewRouter(), store.DataStore{Backend: mockedDataStore}, urlBuilder, mockDownloadGenerator, healthTimeout)
+ api.router.ServeHTTP(w, r)
+
+ Convey("then an internal server error response is returned", func() {
+ So(w.Code, ShouldEqual, http.StatusInternalServerError)
+ })
+
+ Convey("and the expected store calls are made with the expected parameters", func() {
+ genCalls := mockDownloadGenerator.GenerateCalls()
+
+ So(len(mockedDataStore.CheckDatasetExistsCalls()), ShouldEqual, 1)
+ So(mockedDataStore.CheckDatasetExistsCalls()[0].ID, ShouldEqual, "123")
+
+ So(len(mockedDataStore.CheckEditionExistsCalls()), ShouldEqual, 1)
+ So(mockedDataStore.CheckEditionExistsCalls()[0].ID, ShouldEqual, "123")
+ So(mockedDataStore.CheckEditionExistsCalls()[0].EditionID, ShouldEqual, "2017")
+
+ So(len(mockedDataStore.GetVersionCalls()), ShouldEqual, 1)
+ So(mockedDataStore.GetVersionCalls()[0].DatasetID, ShouldEqual, "123")
+ So(mockedDataStore.GetVersionCalls()[0].EditionID, ShouldEqual, "2017")
+ So(mockedDataStore.GetVersionCalls()[0].Version, ShouldEqual, "1")
+
+ So(len(mockedDataStore.UpdateVersionCalls()), ShouldEqual, 1)
+
+ So(len(genCalls), ShouldEqual, 1)
+ So(genCalls[0].DatasetID, ShouldEqual, "123")
+ So(genCalls[0].Edition, ShouldEqual, "2017")
+ So(genCalls[0].Version, ShouldEqual, "1")
+ })
+ })
+ })
+}
+
+func TestPutEmptyVersion(t *testing.T) {
+ var v models.Version
+ json.Unmarshal([]byte(versionAssociatedPayload), &v)
+ v.State = models.AssociatedState
+ xlsDownload := &models.DownloadList{XLS: &models.DownloadObject{Size: "1", URL: "/hello"}}
+
+ Convey("given an existing version with empty downloads", t, func() {
+ mockedDataStore := &storetest.StorerMock{
+ GetVersionFunc: func(datasetID string, editionID string, version string, state string) (*models.Version, error) {
+ return &v, nil
+ },
+ CheckDatasetExistsFunc: func(ID string, state string) error {
+ return nil
+ },
+ CheckEditionExistsFunc: func(ID string, editionID string, state string) error {
+ return nil
+ },
+ UpdateVersionFunc: func(ID string, version *models.Version) error {
+ return nil
+ },
+ }
+
+ Convey("when put version is called with an associated version with empty downloads", func() {
+ r, _ := http.NewRequest("PUT", "http://localhost:22000/datasets/123/editions/2017/versions/1", bytes.NewBufferString(versionAssociatedPayload))
+ r.Header.Add("internal-token", "coffee")
+ w := httptest.NewRecorder()
+
+ api := GetAPIWithMockedDatastore(mockedDataStore, &mocks.DownloadsGeneratorMock{})
+ api.router.ServeHTTP(w, r)
+
+ Convey("then a http status ok is returned", func() {
+ So(w.Code, ShouldEqual, http.StatusOK)
+ })
+
+ Convey("and the updated version is as expected", func() {
+ So(len(mockedDataStore.UpdateVersionCalls()), ShouldEqual, 1)
+ So(mockedDataStore.UpdateVersionCalls()[0].Version.Downloads, ShouldBeNil)
+ })
+ })
+ })
+
+ Convey("given an existing version with a xls download already exists", t, func() {
+ mockedDataStore := &storetest.StorerMock{
+ GetVersionFunc: func(datasetID string, editionID string, version string, state string) (*models.Version, error) {
+ v.Downloads = xlsDownload
+ return &v, nil
+ },
+ CheckDatasetExistsFunc: func(ID string, state string) error {
+ return nil
+ },
+ CheckEditionExistsFunc: func(ID string, editionID string, state string) error {
+ return nil
+ },
+ UpdateVersionFunc: func(ID string, version *models.Version) error {
+ return nil
+ },
+ }
+
+ mockDownloadGenerator := &mocks.DownloadsGeneratorMock{}
+
+ Convey("when put version is called with an associated version with empty downloads", func() {
+ r, _ := http.NewRequest("PUT", "http://localhost:22000/datasets/123/editions/2017/versions/1", bytes.NewBufferString(versionAssociatedPayload))
+ r.Header.Add("internal-token", "coffee")
+ w := httptest.NewRecorder()
+
+ api := GetAPIWithMockedDatastore(mockedDataStore, &mocks.DownloadsGeneratorMock{})
+ api.router.ServeHTTP(w, r)
+
+ Convey("then a http status ok is returned", func() {
+ So(w.Code, ShouldEqual, http.StatusOK)
+ })
+
+ Convey("and any existing version downloads are not overwritten", func() {
+ So(len(mockedDataStore.UpdateVersionCalls()), ShouldEqual, 1)
+ So(mockedDataStore.UpdateVersionCalls()[0].Version.Downloads, ShouldResemble, xlsDownload)
+ })
+
+ Convey("and the expected external calls are made with the correct parameters", func() {
+ So(len(mockedDataStore.CheckDatasetExistsCalls()), ShouldEqual, 1)
+ So(mockedDataStore.CheckDatasetExistsCalls()[0].ID, ShouldEqual, "123")
+ So(mockedDataStore.CheckDatasetExistsCalls()[0].State, ShouldEqual, "")
+
+ So(len(mockedDataStore.CheckEditionExistsCalls()), ShouldEqual, 1)
+ So(mockedDataStore.CheckEditionExistsCalls()[0].ID, ShouldEqual, "123")
+ So(mockedDataStore.CheckEditionExistsCalls()[0].EditionID, ShouldEqual, "2017")
+ So(mockedDataStore.CheckEditionExistsCalls()[0].State, ShouldEqual, "")
+
+ So(len(mockedDataStore.GetVersionCalls()), ShouldEqual, 1)
+ So(mockedDataStore.GetVersionCalls()[0].DatasetID, ShouldEqual, "123")
+ So(mockedDataStore.GetVersionCalls()[0].EditionID, ShouldEqual, "2017")
+ So(mockedDataStore.GetVersionCalls()[0].Version, ShouldEqual, "1")
+ So(mockedDataStore.GetVersionCalls()[0].State, ShouldEqual, "")
+
+ So(len(mockedDataStore.UpdateEditionCalls()), ShouldEqual, 0)
+ So(len(mockedDataStore.UpdateDatasetWithAssociationCalls()), ShouldEqual, 0)
+ So(len(mockDownloadGenerator.GenerateCalls()), ShouldEqual, 0)
+ })
+ })
})
}
func TestPutVersionReturnsError(t *testing.T) {
t.Parallel()
Convey("When the request contain malformed json a bad request status is returned", t, func() {
+ generatorMock := &mocks.DownloadsGeneratorMock{
+ GenerateFunc: func(string, string, string, string) error {
+ return nil
+ },
+ }
+
var b string
b = "{"
r, err := http.NewRequest("PUT", "http://localhost:22000/datasets/123/editions/2017/versions/1", bytes.NewBufferString(b))
@@ -1054,14 +1326,21 @@ func TestPutVersionReturnsError(t *testing.T) {
},
}
- api := GetAPIWithMockedDatastore(mockedDataStore)
+ api := GetAPIWithMockedDatastore(mockedDataStore, generatorMock)
api.router.ServeHTTP(w, r)
So(w.Body.String(), ShouldEqual, "Failed to parse json body\n")
So(w.Code, ShouldEqual, http.StatusBadRequest)
So(len(mockedDataStore.GetVersionCalls()), ShouldEqual, 0)
+ So(len(generatorMock.GenerateCalls()), ShouldEqual, 0)
})
Convey("When the api cannot connect to datastore return an internal server error", t, func() {
+ generatorMock := &mocks.DownloadsGeneratorMock{
+ GenerateFunc: func(string, string, string, string) error {
+ return nil
+ },
+ }
+
var b string
b = versionPayload
r, err := http.NewRequest("PUT", "http://localhost:22000/datasets/123/editions/2017/versions/1", bytes.NewBufferString(b))
@@ -1077,15 +1356,22 @@ func TestPutVersionReturnsError(t *testing.T) {
},
}
- api := GetAPIWithMockedDatastore(mockedDataStore)
+ api := GetAPIWithMockedDatastore(mockedDataStore, generatorMock)
api.router.ServeHTTP(w, r)
So(w.Code, ShouldEqual, http.StatusInternalServerError)
So(w.Body.String(), ShouldEqual, "internal error\n")
So(len(mockedDataStore.CheckDatasetExistsCalls()), ShouldEqual, 1)
So(len(mockedDataStore.CheckEditionExistsCalls()), ShouldEqual, 0)
+ So(len(generatorMock.GenerateCalls()), ShouldEqual, 0)
})
Convey("When the dataset document cannot be found for version return status bad request", t, func() {
+ generatorMock := &mocks.DownloadsGeneratorMock{
+ GenerateFunc: func(datasetID string, edition string, versionID string, version string) error {
+ return nil
+ },
+ }
+
var b string
b = versionPayload
r, err := http.NewRequest("PUT", "http://localhost:22000/datasets/123/editions/2017/versions/1", bytes.NewBufferString(b))
@@ -1101,15 +1387,22 @@ func TestPutVersionReturnsError(t *testing.T) {
},
}
- api := GetAPIWithMockedDatastore(mockedDataStore)
+ api := GetAPIWithMockedDatastore(mockedDataStore, generatorMock)
api.router.ServeHTTP(w, r)
So(w.Code, ShouldEqual, http.StatusBadRequest)
So(w.Body.String(), ShouldEqual, "Dataset not found\n")
So(len(mockedDataStore.CheckDatasetExistsCalls()), ShouldEqual, 1)
So(len(mockedDataStore.CheckEditionExistsCalls()), ShouldEqual, 0)
+ So(len(generatorMock.GenerateCalls()), ShouldEqual, 0)
})
Convey("When the edition document cannot be found for version return status bad request", t, func() {
+ generatorMock := &mocks.DownloadsGeneratorMock{
+ GenerateFunc: func(string, string, string, string) error {
+ return nil
+ },
+ }
+
var b string
b = versionPayload
r, err := http.NewRequest("PUT", "http://localhost:22000/datasets/123/editions/2017/versions/1", bytes.NewBufferString(b))
@@ -1125,16 +1418,23 @@ func TestPutVersionReturnsError(t *testing.T) {
},
}
- api := GetAPIWithMockedDatastore(mockedDataStore)
+ api := GetAPIWithMockedDatastore(mockedDataStore, generatorMock)
api.router.ServeHTTP(w, r)
So(w.Code, ShouldEqual, http.StatusBadRequest)
So(w.Body.String(), ShouldEqual, "Edition not found\n")
So(len(mockedDataStore.CheckDatasetExistsCalls()), ShouldEqual, 1)
So(len(mockedDataStore.CheckEditionExistsCalls()), ShouldEqual, 1)
So(len(mockedDataStore.GetVersionCalls()), ShouldEqual, 0)
+ So(len(generatorMock.GenerateCalls()), ShouldEqual, 0)
})
Convey("When the version document cannot be found return status not found", t, func() {
+ generatorMock := &mocks.DownloadsGeneratorMock{
+ GenerateFunc: func(string, string, string, string) error {
+ return nil
+ },
+ }
+
var b string
b = versionPayload
r, err := http.NewRequest("PUT", "http://localhost:22000/datasets/123/editions/2017/versions/1", bytes.NewBufferString(b))
@@ -1156,7 +1456,7 @@ func TestPutVersionReturnsError(t *testing.T) {
},
}
- api := GetAPIWithMockedDatastore(mockedDataStore)
+ api := GetAPIWithMockedDatastore(mockedDataStore, generatorMock)
api.router.ServeHTTP(w, r)
So(w.Code, ShouldEqual, http.StatusNotFound)
So(w.Body.String(), ShouldEqual, "Version not found\n")
@@ -1164,9 +1464,16 @@ func TestPutVersionReturnsError(t *testing.T) {
So(len(mockedDataStore.CheckEditionExistsCalls()), ShouldEqual, 1)
So(len(mockedDataStore.GetVersionCalls()), ShouldEqual, 1)
So(len(mockedDataStore.UpdateVersionCalls()), ShouldEqual, 0)
+ So(len(generatorMock.GenerateCalls()), ShouldEqual, 0)
})
Convey("When the request does not contain a valid internal token return status unauthorised", t, func() {
+ generatorMock := &mocks.DownloadsGeneratorMock{
+ GenerateFunc: func(string, string, string, string) error {
+ return nil
+ },
+ }
+
var b string
b = versionPayload
r, err := http.NewRequest("PUT", "http://localhost:22000/datasets/123/editions/2017/versions/1", bytes.NewBufferString(b))
@@ -1178,14 +1485,61 @@ func TestPutVersionReturnsError(t *testing.T) {
},
}
- api := GetAPIWithMockedDatastore(mockedDataStore)
+ api := GetAPIWithMockedDatastore(mockedDataStore, generatorMock)
api.router.ServeHTTP(w, r)
So(w.Code, ShouldEqual, http.StatusUnauthorized)
So(w.Body.String(), ShouldEqual, "No authentication header provided\n")
So(len(mockedDataStore.CheckDatasetExistsCalls()), ShouldEqual, 0)
+ So(len(generatorMock.GenerateCalls()), ShouldEqual, 0)
+ })
+
+ Convey("Given the version doc is 'published', when we try to set state to 'completed', then we see a status of forbidden", t, func() {
+ generatorMock := &mocks.DownloadsGeneratorMock{
+ GenerateFunc: func(string, string, string, string) error {
+ return nil
+ },
+ }
+
+ var b string
+ b = versionPayload
+ r, err := http.NewRequest("PUT", "http://localhost:22000/datasets/123/editions/2017/versions/1", bytes.NewBufferString(b))
+ r.Header.Add("internal-token", "coffee")
+ So(err, ShouldBeNil)
+ w := httptest.NewRecorder()
+ mockedDataStore := &storetest.StorerMock{
+ CheckDatasetExistsFunc: func(string, string) error {
+ return nil
+ },
+ CheckEditionExistsFunc: func(string, string, string) error {
+ return nil
+ },
+ GetVersionFunc: func(string, string, string, string) (*models.Version, error) {
+ return &models.Version{
+ State: models.PublishedState,
+ }, nil
+ },
+ UpdateVersionFunc: func(string, *models.Version) error {
+ return nil
+ },
+ }
+
+ api := GetAPIWithMockedDatastore(mockedDataStore, generatorMock)
+ api.router.ServeHTTP(w, r)
+ So(w.Code, ShouldEqual, http.StatusForbidden)
+ So(w.Body.String(), ShouldEqual, "unable to update document, already published\n")
+ So(len(mockedDataStore.CheckDatasetExistsCalls()), ShouldEqual, 1)
+ So(len(mockedDataStore.CheckEditionExistsCalls()), ShouldEqual, 1)
+ So(len(mockedDataStore.GetVersionCalls()), ShouldEqual, 1)
+ So(len(mockedDataStore.UpdateVersionCalls()), ShouldEqual, 0)
})
Convey("When the version document has already been published return status forbidden", t, func() {
+ generatorMock := &mocks.DownloadsGeneratorMock{
+ GenerateFunc: func(string, string, string, string) error {
+ return nil
+ },
+ }
+
var b string
b = versionPayload
r, err := http.NewRequest("PUT", "http://localhost:22000/datasets/123/editions/2017/versions/1", bytes.NewBufferString(b))
@@ -1209,7 +1563,7 @@ func TestPutVersionReturnsError(t *testing.T) {
},
}
- api := GetAPIWithMockedDatastore(mockedDataStore)
+ api := GetAPIWithMockedDatastore(mockedDataStore, generatorMock)
api.router.ServeHTTP(w, r)
So(w.Code, ShouldEqual, http.StatusForbidden)
So(w.Body.String(), ShouldEqual, "unable to update document, already published\n")
@@ -1217,9 +1571,16 @@ func TestPutVersionReturnsError(t *testing.T) {
So(len(mockedDataStore.CheckEditionExistsCalls()), ShouldEqual, 1)
So(len(mockedDataStore.GetVersionCalls()), ShouldEqual, 1)
So(len(mockedDataStore.UpdateVersionCalls()), ShouldEqual, 0)
+ So(len(generatorMock.GenerateCalls()), ShouldEqual, 0)
})
Convey("When the request body is invalid return status bad request", t, func() {
+ generatorMock := &mocks.DownloadsGeneratorMock{
+ GenerateFunc: func(string, string, string, string) error {
+ return nil
+ },
+ }
+
var b string
b = `{"instance_id":"a1b2c3","edition":"2017","license":"ONS","release_date":"2017-04-04","state":"associated"}`
r, err := http.NewRequest("PUT", "http://localhost:22000/datasets/123/editions/2017/versions/1", bytes.NewBufferString(b))
@@ -1241,7 +1602,7 @@ func TestPutVersionReturnsError(t *testing.T) {
},
}
- api := GetAPIWithMockedDatastore(mockedDataStore)
+ api := GetAPIWithMockedDatastore(mockedDataStore, generatorMock)
api.router.ServeHTTP(w, r)
So(w.Code, ShouldEqual, http.StatusBadRequest)
So(w.Body.String(), ShouldEqual, "Missing collection_id for association between version and a collection\n")
@@ -1249,6 +1610,7 @@ func TestPutVersionReturnsError(t *testing.T) {
So(len(mockedDataStore.CheckEditionExistsCalls()), ShouldEqual, 1)
So(len(mockedDataStore.GetVersionCalls()), ShouldEqual, 1)
So(len(mockedDataStore.UpdateVersionCalls()), ShouldEqual, 0)
+ So(len(generatorMock.GenerateCalls()), ShouldEqual, 0)
})
}
@@ -1266,7 +1628,7 @@ func TestGetDimensionsReturnsOk(t *testing.T) {
},
}
- api := GetAPIWithMockedDatastore(mockedDataStore)
+ api := GetAPIWithMockedDatastore(mockedDataStore, &mocks.DownloadsGeneratorMock{})
api.router.ServeHTTP(w, r)
So(w.Code, ShouldEqual, http.StatusOK)
So(len(mockedDataStore.GetVersionCalls()), ShouldEqual, 1)
@@ -1285,7 +1647,7 @@ func TestGetDimensionsReturnsErrors(t *testing.T) {
},
}
- api := GetAPIWithMockedDatastore(mockedDataStore)
+ api := GetAPIWithMockedDatastore(mockedDataStore, &mocks.DownloadsGeneratorMock{})
api.router.ServeHTTP(w, r)
So(w.Code, ShouldEqual, http.StatusInternalServerError)
So(w.Body.String(), ShouldEqual, "internal error\n")
@@ -1303,7 +1665,7 @@ func TestGetDimensionsReturnsErrors(t *testing.T) {
},
}
- api := GetAPIWithMockedDatastore(mockedDataStore)
+ api := GetAPIWithMockedDatastore(mockedDataStore, &mocks.DownloadsGeneratorMock{})
api.router.ServeHTTP(w, r)
So(w.Code, ShouldEqual, http.StatusBadRequest)
So(w.Body.String(), ShouldEqual, "Version not found\n")
@@ -1324,7 +1686,7 @@ func TestGetDimensionsReturnsErrors(t *testing.T) {
},
}
- api := GetAPIWithMockedDatastore(mockedDataStore)
+ api := GetAPIWithMockedDatastore(mockedDataStore, &mocks.DownloadsGeneratorMock{})
api.router.ServeHTTP(w, r)
So(w.Code, ShouldEqual, http.StatusNotFound)
So(w.Body.String(), ShouldEqual, "Dimensions not found\n")
@@ -1345,7 +1707,7 @@ func TestGetDimensionOptionsReturnsOk(t *testing.T) {
},
}
- api := GetAPIWithMockedDatastore(mockedDataStore)
+ api := GetAPIWithMockedDatastore(mockedDataStore, &mocks.DownloadsGeneratorMock{})
api.router.ServeHTTP(w, r)
So(w.Code, ShouldEqual, http.StatusOK)
})
@@ -1362,7 +1724,7 @@ func TestGetDimensionOptionsReturnsErrors(t *testing.T) {
},
}
- api := GetAPIWithMockedDatastore(mockedDataStore)
+ api := GetAPIWithMockedDatastore(mockedDataStore, &mocks.DownloadsGeneratorMock{})
api.router.ServeHTTP(w, r)
So(w.Code, ShouldEqual, http.StatusNotFound)
})
@@ -1376,7 +1738,7 @@ func TestGetDimensionOptionsReturnsErrors(t *testing.T) {
},
}
- api := GetAPIWithMockedDatastore(mockedDataStore)
+ api := GetAPIWithMockedDatastore(mockedDataStore, &mocks.DownloadsGeneratorMock{})
api.router.ServeHTTP(w, r)
So(w.Code, ShouldEqual, http.StatusInternalServerError)
})
@@ -1404,7 +1766,7 @@ func TestGetMetadataReturnsOk(t *testing.T) {
},
}
- api := GetAPIWithMockedDatastore(mockedDataStore)
+ api := GetAPIWithMockedDatastore(mockedDataStore, &mocks.DownloadsGeneratorMock{})
api.router.ServeHTTP(w, r)
So(w.Code, ShouldEqual, http.StatusOK)
So(len(mockedDataStore.GetDatasetCalls()), ShouldEqual, 1)
@@ -1458,7 +1820,7 @@ func TestGetMetadataReturnsOk(t *testing.T) {
},
}
- api := GetAPIWithMockedDatastore(mockedDataStore)
+ api := GetAPIWithMockedDatastore(mockedDataStore, &mocks.DownloadsGeneratorMock{})
api.router.ServeHTTP(w, r)
So(w.Code, ShouldEqual, http.StatusOK)
So(len(mockedDataStore.GetDatasetCalls()), ShouldEqual, 1)
@@ -1503,7 +1865,7 @@ func TestGetMetadataReturnsError(t *testing.T) {
},
}
- api := GetAPIWithMockedDatastore(mockedDataStore)
+ api := GetAPIWithMockedDatastore(mockedDataStore, &mocks.DownloadsGeneratorMock{})
api.router.ServeHTTP(w, r)
So(w.Code, ShouldEqual, http.StatusInternalServerError)
So(w.Body.String(), ShouldEqual, "internal error\n")
@@ -1523,7 +1885,7 @@ func TestGetMetadataReturnsError(t *testing.T) {
},
}
- api := GetAPIWithMockedDatastore(mockedDataStore)
+ api := GetAPIWithMockedDatastore(mockedDataStore, &mocks.DownloadsGeneratorMock{})
api.router.ServeHTTP(w, r)
So(w.Code, ShouldEqual, http.StatusBadRequest)
So(w.Body.String(), ShouldEqual, "Dataset not found\n")
@@ -1548,7 +1910,7 @@ func TestGetMetadataReturnsError(t *testing.T) {
},
}
- api := GetAPIWithMockedDatastore(mockedDataStore)
+ api := GetAPIWithMockedDatastore(mockedDataStore, &mocks.DownloadsGeneratorMock{})
api.router.ServeHTTP(w, r)
So(w.Code, ShouldEqual, http.StatusBadRequest)
So(w.Body.String(), ShouldEqual, "Edition not found\n")
@@ -1577,7 +1939,7 @@ func TestGetMetadataReturnsError(t *testing.T) {
},
}
- api := GetAPIWithMockedDatastore(mockedDataStore)
+ api := GetAPIWithMockedDatastore(mockedDataStore, &mocks.DownloadsGeneratorMock{})
api.router.ServeHTTP(w, r)
So(w.Code, ShouldEqual, http.StatusNotFound)
So(w.Body.String(), ShouldEqual, "Version not found\n")
diff --git a/api/healthcheck.go b/api/healthcheck.go
index acafd289..20a50d42 100644
--- a/api/healthcheck.go
+++ b/api/healthcheck.go
@@ -45,7 +45,8 @@ func (api *DatasetAPI) healthCheck(w http.ResponseWriter, r *http.Request) {
// test db access
wg.Add(1)
go func() {
- lastPing, err := api.dataStore.Backend.Ping(r.Context())
+ var lastPing time.Time
+ lastPing, err = api.dataStore.Backend.Ping(r.Context())
healthChan <- healthResult{Error: err, LastChecked: lastPing}
wg.Done()
if err != nil {
diff --git a/api/healthcheck_test.go b/api/healthcheck_test.go
index 9d79a205..84963149 100644
--- a/api/healthcheck_test.go
+++ b/api/healthcheck_test.go
@@ -8,8 +8,8 @@ import (
"testing"
"time"
- "github.com/ONSdigital/dp-dataset-api/store/datastoretest"
-
+ "github.com/ONSdigital/dp-dataset-api/mocks"
+ storetest "github.com/ONSdigital/dp-dataset-api/store/datastoretest"
. "github.com/smartystreets/goconvey/convey"
)
@@ -25,7 +25,7 @@ func TestHealthCheckReturnsOK(t *testing.T) {
},
}
- api := GetAPIWithMockedDatastore(mockedDataStore)
+ api := GetAPIWithMockedDatastore(mockedDataStore, &mocks.DownloadsGeneratorMock{})
api.router.ServeHTTP(w, r)
So(w.Code, ShouldEqual, http.StatusOK)
body := w.Body.String()
@@ -46,7 +46,7 @@ func TestHealthCheckReturnsError(t *testing.T) {
},
}
- api := GetAPIWithMockedDatastore(mockedDataStore)
+ api := GetAPIWithMockedDatastore(mockedDataStore, &mocks.DownloadsGeneratorMock{})
api.router.ServeHTTP(w, r)
So(w.Code, ShouldEqual, http.StatusInternalServerError)
body := w.Body.String()
diff --git a/config/config.go b/config/config.go
index 3c6c17db..44db8155 100644
--- a/config/config.go
+++ b/config/config.go
@@ -3,13 +3,14 @@ package config
import (
"time"
- "github.com/ONSdigital/go-ns/log"
"github.com/kelseyhightower/envconfig"
)
// Configuration structure which hold information for configuring the import API
type Configuration struct {
BindAddr string `envconfig:"BIND_ADDR"`
+ KafkaAddr []string `envconfig:"KAFKA_ADDR"`
+ GenerateDownloadsTopic string `envconfig:"GENERATE_DOWNLOADS_TOPIC"`
CodeListAPIURL string `envconfig:"CODE_LIST_API_URL"`
DatasetAPIURL string `envconfig:"DATASET_API_URL"`
WebsiteURL string `envconfig:"WEBSITE_URL"`
@@ -36,6 +37,8 @@ func Get() (*Configuration, error) {
cfg = &Configuration{
BindAddr: ":22000",
+ KafkaAddr: []string{"localhost:9092"},
+ GenerateDownloadsTopic: "filter-job-submitted",
CodeListAPIURL: "http://localhost:22400",
DatasetAPIURL: "http://localhost:22000",
WebsiteURL: "http://localhost:20000",
@@ -49,9 +52,5 @@ func Get() (*Configuration, error) {
},
}
- sanitized := *cfg
- sanitized.SecretKey = ""
- log.Info("config on startup", log.Data{"config": sanitized})
-
return cfg, envconfig.Process("", cfg)
}
diff --git a/config/config_test.go b/config/config_test.go
index ae15763b..f25f97ef 100644
--- a/config/config_test.go
+++ b/config/config_test.go
@@ -19,6 +19,8 @@ func TestSpec(t *testing.T) {
Convey("The values should be set to the expected defaults", func() {
So(cfg.BindAddr, ShouldEqual, ":22000")
+ So(cfg.KafkaAddr, ShouldResemble, []string{"localhost:9092"})
+ So(cfg.GenerateDownloadsTopic, ShouldEqual, "filter-job-submitted")
So(cfg.DatasetAPIURL, ShouldEqual, "http://localhost:22000")
So(cfg.CodeListAPIURL, ShouldEqual, "http://localhost:22400")
So(cfg.WebsiteURL, ShouldEqual, "http://localhost:20000")
diff --git a/download/download_generator.go b/download/download_generator.go
new file mode 100644
index 00000000..760ab1d2
--- /dev/null
+++ b/download/download_generator.go
@@ -0,0 +1,108 @@
+package download
+
+import (
+ "fmt"
+
+ "github.com/ONSdigital/go-ns/log"
+ "github.com/pkg/errors"
+)
+
+//go:generate moq -out ../mocks/generate_downloads_mocks.go -pkg mocks . KafkaProducer GenerateDownloadsEvent
+
+var (
+ avroMarshalErr = "error while attempting to marshal generateDownloadsEvent to avro bytes"
+
+ datasetIDEmptyErr = newGeneratorError(nil, "failed to generate full dataset download as dataset ID was empty")
+ instanceIDEmptyErr = newGeneratorError(nil, "failed to generate full dataset download as instance ID was empty")
+ editionEmptyErr = newGeneratorError(nil, "failed to generate full dataset download as edition was empty")
+ versionEmptyErr = newGeneratorError(nil, "failed to generate full dataset download as version was empty")
+)
+
+// KafkaProducer sends an outbound kafka message
+type KafkaProducer interface {
+ Output() chan []byte
+}
+
+// GenerateDownloadsEvent marshal the event into avro format
+type GenerateDownloadsEvent interface {
+ Marshal(s interface{}) ([]byte, error)
+}
+
+type generateDownloads struct {
+ FilterID string `avro:"filter_output_id"`
+ InstanceID string `avro:"instance_id"`
+ DatasetID string `avro:"dataset_id"`
+ Edition string `avro:"edition"`
+ Version string `avro:"version"`
+}
+
+// Generator kicks off a full dataset version download task
+type Generator struct {
+ Producer KafkaProducer
+ Marshaller GenerateDownloadsEvent
+}
+
+// Generate the full file download files for the specified dataset/edition/version
+func (gen *Generator) Generate(datasetID string, instanceID string, edition string, version string) error {
+ if datasetID == "" {
+ return datasetIDEmptyErr
+ }
+ if instanceID == "" {
+ return instanceIDEmptyErr
+ }
+ if edition == "" {
+ return editionEmptyErr
+ }
+ if version == "" {
+ return versionEmptyErr
+ }
+
+ // FilterID is set to an empty string as the avro schema expects there to be
+ // a filter ID otherwise struct wont be marshalled into an acceptable message
+ downloads := generateDownloads{
+ FilterID: "",
+ DatasetID: datasetID,
+ InstanceID: instanceID,
+ Edition: edition,
+ Version: version,
+ }
+
+ log.Info("send generate downloads event", log.Data{
+ "datasetID": datasetID,
+ "instanceID": instanceID,
+ "edition": edition,
+ "version": version,
+ })
+
+ avroBytes, err := gen.Marshaller.Marshal(downloads)
+ if err != nil {
+ return newGeneratorError(err, avroMarshalErr)
+ }
+
+ gen.Producer.Output() <- avroBytes
+
+ return nil
+}
+
+// GeneratorError is a wrapper for errors returned from the Generator
+type GeneratorError struct {
+ originalErr error
+ message string
+ args []interface{}
+}
+
+func newGeneratorError(err error, message string, args ...interface{}) GeneratorError {
+ return GeneratorError{
+ originalErr: err,
+ message: message,
+ args: args,
+ }
+}
+
+// Error return details about the error
+func (genErr GeneratorError) Error() string {
+ if genErr.originalErr == nil {
+ return errors.Errorf(genErr.message, genErr.args...).Error()
+ }
+ return errors.Wrap(genErr.originalErr, fmt.Sprintf(genErr.message, genErr.args...)).Error()
+}
diff --git a/download/download_generator_test.go b/download/download_generator_test.go
new file mode 100644
index 00000000..38116f47
--- /dev/null
+++ b/download/download_generator_test.go
@@ -0,0 +1,197 @@
+package download
+
+import (
+ "testing"
+
+ "github.com/ONSdigital/dp-dataset-api/mocks"
+ "github.com/pkg/errors"
+ . "github.com/smartystreets/goconvey/convey"
+)
+
+func TestGenerator_GenerateFullDatasetDownloadsValidationErrors(t *testing.T) {
+ producerMock := &mocks.KafkaProducerMock{
+ OutputFunc: func() chan []byte {
+ return nil
+ },
+ }
+
+ marhsallerMock := &mocks.GenerateDownloadsEventMock{
+ MarshalFunc: func(s interface{}) ([]byte, error) {
+ return nil, nil
+ },
+ }
+
+ gen := Generator{
+ Producer: producerMock,
+ Marshaller: marhsallerMock,
+ }
+
+ Convey("Given an invalid datasetID", t, func() {
+
+ Convey("When the generator is called", func() {
+ err := gen.Generate("", "", "", "")
+
+ Convey("Then the expected error is returned", func() {
+ So(err, ShouldResemble, datasetIDEmptyErr)
+ })
+
+ Convey("And marshaller is never called", func() {
+ So(len(marhsallerMock.MarshalCalls()), ShouldEqual, 0)
+ })
+
+ Convey("And producer is never called", func() {
+ So(len(producerMock.OutputCalls()), ShouldEqual, 0)
+ })
+ })
+ })
+
+ Convey("Given an empty instanceID", t, func() {
+ Convey("When the generator is called", func() {
+ err := gen.Generate("1234567890", "", "", "")
+
+ Convey("Then the expected error is returned", func() {
+ So(err, ShouldResemble, instanceIDEmptyErr)
+ })
+
+ Convey("And marshaller is never called", func() {
+ So(len(marhsallerMock.MarshalCalls()), ShouldEqual, 0)
+ })
+
+ Convey("And producer is never called", func() {
+ So(len(producerMock.OutputCalls()), ShouldEqual, 0)
+ })
+ })
+ })
+
+ Convey("Given an empty edition", t, func() {
+ Convey("When the generator is called", func() {
+ err := gen.Generate("1234567890", "1234567890", "", "")
+
+ Convey("Then the expected error is returned", func() {
+ So(err, ShouldResemble, editionEmptyErr)
+ })
+
+ Convey("And marshaller is never called", func() {
+ So(len(marhsallerMock.MarshalCalls()), ShouldEqual, 0)
+ })
+
+ Convey("And producer is never called", func() {
+ So(len(producerMock.OutputCalls()), ShouldEqual, 0)
+ })
+ })
+ })
+
+ Convey("Given an empty version", t, func() {
+ Convey("When the generator is called", func() {
+ err := gen.Generate("1234567890", "1234567890", "time-series", "")
+
+ Convey("Then the expected error is returned", func() {
+ So(err, ShouldResemble, versionEmptyErr)
+ })
+
+ Convey("And marshaller is never called", func() {
+ So(len(marhsallerMock.MarshalCalls()), ShouldEqual, 0)
+ })
+
+ Convey("And producer is never called", func() {
+ So(len(producerMock.OutputCalls()), ShouldEqual, 0)
+ })
+ })
+ })
+}
+
+func TestGenerator_GenerateMarshalError(t *testing.T) {
+ Convey("when marshal returns an error", t, func() {
+ datasetID := "111"
+ instanceID := "222"
+ edition := "333"
+ version := "4"
+ mockErr := errors.New("let's get schwifty")
+
+ producerMock := &mocks.KafkaProducerMock{
+ OutputFunc: func() chan []byte {
+ return nil
+ },
+ }
+
+ marhsallerMock := &mocks.GenerateDownloadsEventMock{
+ MarshalFunc: func(s interface{}) ([]byte, error) {
+ return nil, mockErr
+ },
+ }
+
+ gen := Generator{
+ Producer: producerMock,
+ Marshaller: marhsallerMock,
+ }
+
+ err := gen.Generate(datasetID, instanceID, edition, version)
+
+ Convey("then then expected error is returned", func() {
+ So(err, ShouldResemble, newGeneratorError(mockErr, avroMarshalErr))
+ })
+
+ Convey("and marshal is called one time", func() {
+ So(len(marhsallerMock.MarshalCalls()), ShouldEqual, 1)
+ })
+
+ Convey("and kafka producer is never called", func() {
+ So(len(producerMock.OutputCalls()), ShouldEqual, 0)
+ })
+ })
+}
+
+func TestGenerator_Generate(t *testing.T) {
+ Convey("given valid input", t, func() {
+ datasetID := "111"
+ instanceID := "222"
+ edition := "333"
+ version := "4"
+
+ downloads := generateDownloads{
+ FilterID: "",
+ DatasetID: datasetID,
+ InstanceID: instanceID,
+ Edition: edition,
+ Version: version,
+ }
+
+ output := make(chan []byte, 1)
+ avroBytes := []byte("hello world")
+
+ producerMock := &mocks.KafkaProducerMock{
+ OutputFunc: func() chan []byte {
+ return output
+ },
+ }
+
+ marhsallerMock := &mocks.GenerateDownloadsEventMock{
+ MarshalFunc: func(s interface{}) ([]byte, error) {
+ return avroBytes, nil
+ },
+ }
+
+ gen := Generator{
+ Producer: producerMock,
+ Marshaller: marhsallerMock,
+ }
+
+ Convey("when generate is called no error is returned", func() {
+ err := gen.Generate(datasetID, instanceID, edition, version)
+ So(err, ShouldBeNil)
+
+ Convey("then marshal is called with the expected parameters", func() {
+ So(len(marhsallerMock.MarshalCalls()), ShouldEqual, 1)
+ So(marhsallerMock.MarshalCalls()[0].S, ShouldResemble, downloads)
+ })
+
+ Convey("and producer output is called one time with the expected parameters", func() {
+ So(len(producerMock.OutputCalls()), ShouldEqual, 1)
+
+ producerOut := <-output
+ So(producerOut, ShouldResemble, avroBytes)
+ })
+
+ })
+ })
+}
diff --git a/instance/instance.go b/instance/instance.go
index 4bb95915..fb4e27c5 100644
--- a/instance/instance.go
+++ b/instance/instance.go
@@ -15,7 +15,7 @@ import (
"github.com/ONSdigital/dp-dataset-api/store"
"github.com/ONSdigital/go-ns/log"
"github.com/gorilla/mux"
- uuid "github.com/satori/go.uuid"
+ "github.com/satori/go.uuid"
)
//Store provides a backend for instances
@@ -133,16 +133,23 @@ func (s *Store) Update(w http.ResponseWriter, r *http.Request) {
// Combine existing links and spatial link
instance.Links = updateLinks(instance, currentInstance)
+ logData := log.Data{"instance_id": id, "current_state": currentInstance.State, "requested_state": instance.State}
switch instance.State {
+ case models.CompletedState:
+ if err = validateInstanceUpdate(models.SubmittedState, currentInstance, instance); err != nil {
+ log.Error(err, logData)
+ http.Error(w, err.Error(), http.StatusForbidden)
+ return
+ }
case models.EditionConfirmedState:
if err = validateInstanceUpdate(models.CompletedState, currentInstance, instance); err != nil {
- log.Error(err, log.Data{"instance_id": id, "current_state": currentInstance.State})
+ log.Error(err, logData)
http.Error(w, err.Error(), http.StatusForbidden)
return
}
case models.AssociatedState:
if err = validateInstanceUpdate(models.EditionConfirmedState, currentInstance, instance); err != nil {
- log.Error(err, log.Data{"instance_id": id, "current_state": currentInstance.State})
+ log.Error(err, logData)
http.Error(w, err.Error(), http.StatusForbidden)
return
}
@@ -150,7 +157,7 @@ func (s *Store) Update(w http.ResponseWriter, r *http.Request) {
// TODO Update dataset.next state to associated and add collection id
case models.PublishedState:
if err = validateInstanceUpdate(models.AssociatedState, currentInstance, instance); err != nil {
- log.Error(err, log.Data{"instance_id": id, "current_state": currentInstance.State})
+ log.Error(err, logData)
http.Error(w, err.Error(), http.StatusForbidden)
return
}
@@ -349,6 +356,75 @@ func (s *Store) UpdateObservations(w http.ResponseWriter, r *http.Request) {
}
}
+func (s *Store) UpdateImportTask(w http.ResponseWriter, r *http.Request) {
+
+ vars := mux.Vars(r)
+ id := vars["id"]
+
+ defer r.Body.Close()
+
+ tasks, err := unmarshalImportTasks(r.Body)
+ if err != nil {
+ log.Error(err, nil)
+ http.Error(w, err.Error(), http.StatusBadRequest)
+ return
+ }
+
+ validationErrs := make([]error, 0)
+
+ if tasks.ImportObservations != nil {
+ if tasks.ImportObservations.State != "" {
+ if tasks.ImportObservations.State != models.CompletedState {
+ validationErrs = append(validationErrs, fmt.Errorf("bad request - invalid task state value for import observations: %v", tasks.ImportObservations.State))
+ } else if err := s.UpdateImportObservationsTaskState(id, tasks.ImportObservations.State); err != nil {
+ log.Error(err, nil)
+ http.Error(w, err.Error(), http.StatusInternalServerError)
+ return
+ }
+ }
+ }
+
+ if tasks.BuildHierarchyTasks != nil {
+ for _, task := range tasks.BuildHierarchyTasks {
+ if task.State != "" {
+ if task.State != models.CompletedState {
+ validationErrs = append(validationErrs, fmt.Errorf("bad request - invalid task state value: %v", task.State))
+ } else if err := s.UpdateBuildHierarchyTaskState(id, task.DimensionName, task.State); err != nil {
+ log.Error(err, nil)
+ http.Error(w, err.Error(), http.StatusInternalServerError)
+ return
+ }
+ }
+ }
+ }
+
+ if len(validationErrs) > 0 {
+ for _, err := range validationErrs {
+ log.Error(err, nil)
+ }
+ // todo: add all validation errors to the response
+ http.Error(w, validationErrs[0].Error(), http.StatusBadRequest)
+ return
+ }
+
+}
+
+func unmarshalImportTasks(reader io.Reader) (*models.InstanceImportTasks, error) {
+
+ bytes, err := ioutil.ReadAll(reader)
+ if err != nil {
+ return nil, errors.New("failed to read message body")
+ }
+
+ var tasks models.InstanceImportTasks
+ err = json.Unmarshal(bytes, &tasks)
+ if err != nil {
+ return nil, errors.New("failed to parse json body: " + err.Error())
+ }
+
+ return &tasks, nil
+}
+
func unmarshalInstance(reader io.Reader, post bool) (*models.Instance, error) {
bytes, err := ioutil.ReadAll(reader)
if err != nil {
diff --git a/instance/instance_external_test.go b/instance/instance_external_test.go
index 051a1d6b..3ccbf40d 100644
--- a/instance/instance_external_test.go
+++ b/instance/instance_external_test.go
@@ -11,7 +11,7 @@ import (
errs "github.com/ONSdigital/dp-dataset-api/apierrors"
"github.com/ONSdigital/dp-dataset-api/instance"
"github.com/ONSdigital/dp-dataset-api/models"
- storetest "github.com/ONSdigital/dp-dataset-api/store/datastoretest"
+ "github.com/ONSdigital/dp-dataset-api/store/datastoretest"
"github.com/gorilla/mux"
. "github.com/smartystreets/goconvey/convey"
)
@@ -352,7 +352,42 @@ func TestUpdateInstanceFailure(t *testing.T) {
})
}
-func TestUpdateInstanceReturnsInternalError(t *testing.T) {
+func TestUpdatePublishedInstanceToCompletedReturnsForbidden(t *testing.T) {
+ t.Parallel()
+ Convey("Given a 'published' instance, when we update to 'completed' then we get a bad-request error", t, func() {
+ body := strings.NewReader(`{"state":"completed"}`)
+ r := createRequestWithToken("PUT", "http://localhost:21800/instances/1235", body)
+ w := httptest.NewRecorder()
+
+ currentInstanceTestData := &models.Instance{
+ Edition: "2017",
+ Links: &models.InstanceLinks{
+ Dataset: &models.IDLink{
+ ID: "4567",
+ },
+ },
+ State: models.PublishedState,
+ }
+
+ mockedDataStore := &storetest.StorerMock{
+ GetInstanceFunc: func(id string) (*models.Instance, error) {
+ return currentInstanceTestData, nil
+ },
+ UpdateInstanceFunc: func(id string, i *models.Instance) error {
+ return internalError
+ },
+ }
+
+ instance := &instance.Store{Host: host, Storer: mockedDataStore}
+ instance.Update(w, r)
+
+ So(w.Code, ShouldEqual, http.StatusForbidden)
+ So(len(mockedDataStore.GetInstanceCalls()), ShouldEqual, 1)
+ So(len(mockedDataStore.UpdateInstanceCalls()), ShouldEqual, 0)
+ })
+}
+
+func TestUpdateCompletedInstanceToCompletedReturnsForbidden(t *testing.T) {
t.Parallel()
Convey("update to an instance returns an internal error", t, func() {
body := strings.NewReader(`{"state":"completed"}`)
@@ -381,10 +416,9 @@ func TestUpdateInstanceReturnsInternalError(t *testing.T) {
instance := &instance.Store{Host: host, Storer: mockedDataStore}
instance.Update(w, r)
- So(w.Code, ShouldEqual, http.StatusInternalServerError)
+ So(w.Code, ShouldEqual, http.StatusForbidden)
So(len(mockedDataStore.GetInstanceCalls()), ShouldEqual, 1)
- So(len(mockedDataStore.UpdateInstanceCalls()), ShouldEqual, 1)
- So(len(mockedDataStore.UpsertEditionCalls()), ShouldEqual, 0)
+ So(len(mockedDataStore.UpdateInstanceCalls()), ShouldEqual, 0)
})
}
@@ -446,3 +480,122 @@ func TestInsertedObservationsReturnsNotFound(t *testing.T) {
So(len(mockedDataStore.UpdateObservationInsertedCalls()), ShouldEqual, 1)
})
}
+
+func TestStore_UpdateImportTask_UpdateImportObservations(t *testing.T) {
+
+ t.Parallel()
+ Convey("update to an import task returns http 200 response if no errors occur", t, func() {
+ body := strings.NewReader(`{"import_observations":{"state":"completed"}}`)
+ r := createRequestWithToken("PUT", "http://localhost:21800/instances/123/import_tasks", body)
+ w := httptest.NewRecorder()
+
+ mockedDataStore := &storetest.StorerMock{
+ UpdateImportObservationsTaskStateFunc: func(id string, state string) error {
+ return nil
+ },
+ }
+
+ instance := &instance.Store{Host: host, Storer: mockedDataStore}
+
+ instance.UpdateImportTask(w, r)
+
+ So(w.Code, ShouldEqual, http.StatusOK)
+ So(len(mockedDataStore.UpdateImportObservationsTaskStateCalls()), ShouldEqual, 1)
+ So(len(mockedDataStore.UpdateBuildHierarchyTaskStateCalls()), ShouldEqual, 0)
+ })
+}
+
+func TestStore_UpdateImportTask_UpdateImportObservations_InvalidState(t *testing.T) {
+
+ t.Parallel()
+ Convey("update to an import task with an invalid state returns http 400 response", t, func() {
+ body := strings.NewReader(`{"import_observations":{"state":"notvalid"}}`)
+ r := createRequestWithToken("PUT", "http://localhost:21800/instances/123/import_tasks", body)
+ w := httptest.NewRecorder()
+
+ mockedDataStore := &storetest.StorerMock{
+ UpdateImportObservationsTaskStateFunc: func(id string, state string) error {
+ return nil
+ },
+ }
+
+ instance := &instance.Store{Host: host, Storer: mockedDataStore}
+
+ instance.UpdateImportTask(w, r)
+
+ So(w.Code, ShouldEqual, http.StatusBadRequest)
+ So(len(mockedDataStore.UpdateImportObservationsTaskStateCalls()), ShouldEqual, 0)
+ So(len(mockedDataStore.UpdateBuildHierarchyTaskStateCalls()), ShouldEqual, 0)
+ })
+}
+
+func TestStore_UpdateImportTask_UpdateBuildHierarchyTask_InvalidState(t *testing.T) {
+
+ t.Parallel()
+ Convey("update to an import task with an invalid state returns http 400 response", t, func() {
+ body := strings.NewReader(`{"build_hierarchies":[{"state":"notvalid"}]}`)
+ r := createRequestWithToken("PUT", "http://localhost:21800/instances/123/import_tasks", body)
+ w := httptest.NewRecorder()
+
+ mockedDataStore := &storetest.StorerMock{
+ UpdateBuildHierarchyTaskStateFunc: func(id string, dimension string, state string) error {
+ return nil
+ },
+ }
+
+ instance := &instance.Store{Host: host, Storer: mockedDataStore}
+
+ instance.UpdateImportTask(w, r)
+
+ So(w.Code, ShouldEqual, http.StatusBadRequest)
+ So(len(mockedDataStore.UpdateImportObservationsTaskStateCalls()), ShouldEqual, 0)
+ So(len(mockedDataStore.UpdateBuildHierarchyTaskStateCalls()), ShouldEqual, 0)
+ })
+}
+
+func TestStore_UpdateImportTask_UpdateBuildHierarchyTask(t *testing.T) {
+
+ t.Parallel()
+ Convey("update to an import task returns http 200 response if no errors occur", t, func() {
+ body := strings.NewReader(`{"build_hierarchies":[{"state":"completed"}]}`)
+ r := createRequestWithToken("PUT", "http://localhost:21800/instances/123/import_tasks", body)
+ w := httptest.NewRecorder()
+
+ mockedDataStore := &storetest.StorerMock{
+ UpdateBuildHierarchyTaskStateFunc: func(id string, dimension string, state string) error {
+ return nil
+ },
+ }
+
+ instance := &instance.Store{Host: host, Storer: mockedDataStore}
+
+ instance.UpdateImportTask(w, r)
+
+ So(w.Code, ShouldEqual, http.StatusOK)
+ So(len(mockedDataStore.UpdateImportObservationsTaskStateCalls()), ShouldEqual, 0)
+ So(len(mockedDataStore.UpdateBuildHierarchyTaskStateCalls()), ShouldEqual, 1)
+ })
+}
+
+func TestStore_UpdateImportTask_ReturnsInternalError(t *testing.T) {
+
+ t.Parallel()
+ Convey("update to an import task returns an internal error", t, func() {
+ body := strings.NewReader(`{"import_observations":{"state":"completed"}}`)
+ r := createRequestWithToken("PUT", "http://localhost:21800/instances/123/import_tasks", body)
+ w := httptest.NewRecorder()
+
+ mockedDataStore := &storetest.StorerMock{
+ UpdateImportObservationsTaskStateFunc: func(id string, state string) error {
+ return internalError
+ },
+ }
+
+ instance := &instance.Store{Host: host, Storer: mockedDataStore}
+
+ instance.UpdateImportTask(w, r)
+
+ So(w.Code, ShouldEqual, http.StatusInternalServerError)
+ So(len(mockedDataStore.UpdateImportObservationsTaskStateCalls()), ShouldEqual, 1)
+ })
+}
diff --git a/instance/instance_internal_test.go b/instance/instance_internal_test.go
index 19201848..dc6f9c72 100644
--- a/instance/instance_internal_test.go
+++ b/instance/instance_internal_test.go
@@ -98,3 +98,29 @@ func TestUnmarshalInstance(t *testing.T) {
So(instance.Links.Job.ID, ShouldEqual, "123-456")
})
}
+
+func TestUnmarshalImportTaskWithBadReader(t *testing.T) {
+ Convey("Create an import task with an invalid reader", t, func() {
+ task, err := unmarshalImportTasks(Reader{})
+ So(task, ShouldBeNil)
+ So(err.Error(), ShouldEqual, "failed to read message body")
+ })
+}
+
+func TestUnmarshalImportTaskWithInvalidJson(t *testing.T) {
+ Convey("Create an import observation task with invalid json", t, func() {
+ task, err := unmarshalImportTasks(strings.NewReader("{ "))
+ So(task, ShouldBeNil)
+ So(err.Error(), ShouldContainSubstring, "failed to parse json body")
+ })
+}
+
+func TestUnmarshalImportTask(t *testing.T) {
+ Convey("Create an import observation task with valid json", t, func() {
+ task, err := unmarshalImportTasks(strings.NewReader(`{"import_observations":{"state":"completed"}}`))
+ So(err, ShouldBeNil)
+ So(task, ShouldNotBeNil)
+ So(task.ImportObservations, ShouldNotBeNil)
+ So(task.ImportObservations.State, ShouldEqual, "completed")
+ })
+}
diff --git a/main.go b/main.go
index ae3441ee..fe3f3115 100644
--- a/main.go
+++ b/main.go
@@ -9,11 +9,17 @@ import (
"github.com/ONSdigital/dp-dataset-api/api"
"github.com/ONSdigital/dp-dataset-api/config"
+ "github.com/ONSdigital/dp-dataset-api/download"
"github.com/ONSdigital/dp-dataset-api/mongo"
+ "github.com/ONSdigital/dp-dataset-api/schema"
"github.com/ONSdigital/dp-dataset-api/store"
+
+ "github.com/ONSdigital/go-ns/kafka"
+
"github.com/ONSdigital/dp-dataset-api/url"
"github.com/ONSdigital/go-ns/log"
mongoclosure "github.com/ONSdigital/go-ns/mongo"
+ "github.com/pkg/errors"
)
func main() {
@@ -28,6 +34,16 @@ func main() {
os.Exit(1)
}
+ sanitized := *cfg
+ sanitized.SecretKey = ""
+ log.Info("config on startup", log.Data{"config": sanitized})
+
+ generateDownloadsProducer, err := kafka.NewProducer(cfg.KafkaAddr, cfg.GenerateDownloadsTopic, 0)
+ if err != nil {
+ log.Error(errors.Wrap(err, "error creating kakfa producer"), nil)
+ os.Exit(1)
+ }
+
mongo := &mongo.Mongo{
CodeListURL: cfg.CodeListAPIURL,
Collection: cfg.MongoConfig.Collection,
@@ -48,24 +64,35 @@ func main() {
"bind_address": cfg.BindAddr,
})
+ store := store.DataStore{Backend: mongo}
+
+ downloadGenerator := &download.Generator{
+ Producer: generateDownloadsProducer,
+ Marshaller: schema.GenerateDownloadsEvent,
+ }
+
apiErrors := make(chan error, 1)
urlBuilder := url.NewBuilder(cfg.WebsiteURL)
- api.CreateDatasetAPI(cfg.DatasetAPIURL, cfg.BindAddr, cfg.SecretKey, store.DataStore{Backend: mongo}, urlBuilder, apiErrors, cfg.HealthCheckTimeout)
+ api.CreateDatasetAPI(cfg.DatasetAPIURL, cfg.BindAddr, cfg.SecretKey, store, urlBuilder, apiErrors, downloadGenerator, cfg.HealthCheckTimeout)
// Gracefully shutdown the application closing any open resources.
gracefulShutdown := func() {
log.Info(fmt.Sprintf("shutdown with timeout: %s", cfg.GracefulShutdownTimeout), nil)
ctx, cancel := context.WithTimeout(context.Background(), cfg.GracefulShutdownTimeout)
+ // stop any incoming requests before closing any outbound connections
api.Close(ctx)
- // mongo.Close() may use all remaining time in the context - do this last!
if err = mongoclosure.Close(ctx, session); err != nil {
log.Error(err, nil)
}
+ if err := generateDownloadsProducer.Close(ctx); err != nil {
+ log.Error(errors.Wrap(err, "error while attempting to shutdown kafka producer"), nil)
+ }
+
log.Info("shutdown complete", nil)
cancel()
diff --git a/mocks/generate_downloads_mocks.go b/mocks/generate_downloads_mocks.go
new file mode 100755
index 00000000..4495b32b
--- /dev/null
+++ b/mocks/generate_downloads_mocks.go
@@ -0,0 +1,129 @@
+// Code generated by moq; DO NOT EDIT
+// github.com/matryer/moq
+
+package mocks
+
+import (
+ "sync"
+)
+
+var (
+ lockKafkaProducerMockOutput sync.RWMutex
+)
+
+// KafkaProducerMock is a mock implementation of KafkaProducer.
+//
+// func TestSomethingThatUsesKafkaProducer(t *testing.T) {
+//
+// // make and configure a mocked KafkaProducer
+// mockedKafkaProducer := &KafkaProducerMock{
+// OutputFunc: func() chan []byte {
+// panic("TODO: mock out the Output method")
+// },
+// }
+//
+// // TODO: use mockedKafkaProducer in code that requires KafkaProducer
+// // and then make assertions.
+//
+// }
+type KafkaProducerMock struct {
+ // OutputFunc mocks the Output method.
+ OutputFunc func() chan []byte
+
+ // calls tracks calls to the methods.
+ calls struct {
+ // Output holds details about calls to the Output method.
+ Output []struct {
+ }
+ }
+}
+
+// Output calls OutputFunc.
+func (mock *KafkaProducerMock) Output() chan []byte {
+ if mock.OutputFunc == nil {
+ panic("moq: KafkaProducerMock.OutputFunc is nil but KafkaProducer.Output was just called")
+ }
+ callInfo := struct {
+ }{}
+ lockKafkaProducerMockOutput.Lock()
+ mock.calls.Output = append(mock.calls.Output, callInfo)
+ lockKafkaProducerMockOutput.Unlock()
+ return mock.OutputFunc()
+}
+
+// OutputCalls gets all the calls that were made to Output.
+// Check the length with:
+// len(mockedKafkaProducer.OutputCalls())
+func (mock *KafkaProducerMock) OutputCalls() []struct {
+} {
+ var calls []struct {
+ }
+ lockKafkaProducerMockOutput.RLock()
+ calls = mock.calls.Output
+ lockKafkaProducerMockOutput.RUnlock()
+ return calls
+}
+
+var (
+ lockGenerateDownloadsEventMockMarshal sync.RWMutex
+)
+
+// GenerateDownloadsEventMock is a mock implementation of GenerateDownloadsEvent.
+//
+// func TestSomethingThatUsesGenerateDownloadsEvent(t *testing.T) {
+//
+// // make and configure a mocked GenerateDownloadsEvent
+// mockedGenerateDownloadsEvent := &GenerateDownloadsEventMock{
+// MarshalFunc: func(s interface{}) ([]byte, error) {
+// panic("TODO: mock out the Marshal method")
+// },
+// }
+//
+// // TODO: use mockedGenerateDownloadsEvent in code that requires GenerateDownloadsEvent
+// // and then make assertions.
+//
+// }
+type GenerateDownloadsEventMock struct {
+ // MarshalFunc mocks the Marshal method.
+ MarshalFunc func(s interface{}) ([]byte, error)
+
+ // calls tracks calls to the methods.
+ calls struct {
+ // Marshal holds details about calls to the Marshal method.
+ Marshal []struct {
+ // S is the s argument value.
+ S interface{}
+ }
+ }
+}
+
+// Marshal calls MarshalFunc.
+func (mock *GenerateDownloadsEventMock) Marshal(s interface{}) ([]byte, error) {
+ if mock.MarshalFunc == nil {
+ panic("moq: GenerateDownloadsEventMock.MarshalFunc is nil but GenerateDownloadsEvent.Marshal was just called")
+ }
+ callInfo := struct {
+ S interface{}
+ }{
+ S: s,
+ }
+ lockGenerateDownloadsEventMockMarshal.Lock()
+ mock.calls.Marshal = append(mock.calls.Marshal, callInfo)
+ lockGenerateDownloadsEventMockMarshal.Unlock()
+ return mock.MarshalFunc(s)
+}
+
+// MarshalCalls gets all the calls that were made to Marshal.
+// Check the length with:
+// len(mockedGenerateDownloadsEvent.MarshalCalls())
+func (mock *GenerateDownloadsEventMock) MarshalCalls() []struct {
+ S interface{}
+} {
+ var calls []struct {
+ S interface{}
+ }
+ lockGenerateDownloadsEventMockMarshal.RLock()
+ calls = mock.calls.Marshal
+ lockGenerateDownloadsEventMockMarshal.RUnlock()
+ return calls
+}
diff --git a/mocks/mocks.go b/mocks/mocks.go
new file mode 100755
index 00000000..3d18545b
--- /dev/null
+++ b/mocks/mocks.go
@@ -0,0 +1,90 @@
+// Code generated by moq; DO NOT EDIT
+// github.com/matryer/moq
+
+package mocks
+
+import (
+ "sync"
+)
+
+var (
+ lockDownloadsGeneratorMockGenerate sync.RWMutex
+)
+
+// DownloadsGeneratorMock is a mock implementation of DownloadsGenerator.
+//
+// func TestSomethingThatUsesDownloadsGenerator(t *testing.T) {
+//
+// // make and configure a mocked DownloadsGenerator
+// mockedDownloadsGenerator := &DownloadsGeneratorMock{
+// GenerateFunc: func(datasetID string, instanceID string, edition string, version string) error {
+// panic("TODO: mock out the Generate method")
+// },
+// }
+//
+// // TODO: use mockedDownloadsGenerator in code that requires DownloadsGenerator
+// // and then make assertions.
+//
+// }
+type DownloadsGeneratorMock struct {
+ // GenerateFunc mocks the Generate method.
+ GenerateFunc func(datasetID string, instanceID string, edition string, version string) error
+
+ // calls tracks calls to the methods.
+ calls struct {
+ // Generate holds details about calls to the Generate method.
+ Generate []struct {
+ // DatasetID is the datasetID argument value.
+ DatasetID string
+ // InstanceID is the instanceID argument value.
+ InstanceID string
+ // Edition is the edition argument value.
+ Edition string
+ // Version is the version argument value.
+ Version string
+ }
+ }
+}
+
+// Generate calls GenerateFunc.
+func (mock *DownloadsGeneratorMock) Generate(datasetID string, instanceID string, edition string, version string) error {
+ if mock.GenerateFunc == nil {
+ panic("moq: DownloadsGeneratorMock.GenerateFunc is nil but DownloadsGenerator.Generate was just called")
+ }
+ callInfo := struct {
+ DatasetID string
+ InstanceID string
+ Edition string
+ Version string
+ }{
+ DatasetID: datasetID,
+ InstanceID: instanceID,
+ Edition: edition,
+ Version: version,
+ }
+ lockDownloadsGeneratorMockGenerate.Lock()
+ mock.calls.Generate = append(mock.calls.Generate, callInfo)
+ lockDownloadsGeneratorMockGenerate.Unlock()
+ return mock.GenerateFunc(datasetID, instanceID, edition, version)
+}
+
+// GenerateCalls gets all the calls that were made to Generate.
+// Check the length with:
+// len(mockedDownloadsGenerator.GenerateCalls())
+func (mock *DownloadsGeneratorMock) GenerateCalls() []struct {
+ DatasetID string
+ InstanceID string
+ Edition string
+ Version string
+} {
+ var calls []struct {
+ DatasetID string
+ InstanceID string
+ Edition string
+ Version string
+ }
+ lockDownloadsGeneratorMockGenerate.RLock()
+ calls = mock.calls.Generate
+ lockDownloadsGeneratorMockGenerate.RUnlock()
+ return calls
+}
diff --git a/models/dataset.go b/models/dataset.go
index cabdba8b..df05bd3b 100644
--- a/models/dataset.go
+++ b/models/dataset.go
@@ -2,13 +2,14 @@ package models
import (
"encoding/json"
- "errors"
"fmt"
"io"
"io/ioutil"
+ "strconv"
"time"
- uuid "github.com/satori/go.uuid"
+ "github.com/pkg/errors"
+ "github.com/satori/go.uuid"
)
// DatasetResults represents a structure for a list of datasets
@@ -157,7 +158,9 @@ type DownloadList struct {
// DownloadObject represents information on the downloadable file
type DownloadObject struct {
- URL string `bson:"url,omitempty" json:"url,omitempty"`
+ URL string `bson:"url,omitempty" json:"url,omitempty"`
+ // TODO size is in bytes and probably should be an int64 instead of a string this
+ // will have to change for several services (filter API, exporter services and web)
Size string `bson:"size,omitempty" json:"size,omitempty"`
}
@@ -199,7 +202,6 @@ func CreateDataset(reader io.Reader) (*Dataset, error) {
if err != nil {
return nil, errors.New("Failed to parse json body")
}
-
return &dataset, nil
}
@@ -222,6 +224,21 @@ func CreateVersion(reader io.Reader) (*Version, error) {
return &version, nil
}
+// CreateDownloadList manages the creation of a list downloadable items from a reader
+func CreateDownloadList(reader io.Reader) (*DownloadList, error) {
+ bytes, err := ioutil.ReadAll(reader)
+ if err != nil {
+ return nil, errors.Wrap(err, "failed to read response body")
+ }
+
+ var downloadList DownloadList
+ if err := json.Unmarshal(bytes, &downloadList); err != nil {
+ return nil, errors.Wrap(err, "failed to parse json to downloadList")
+ }
+
+ return &downloadList, nil
+}
+
// CreateContact manages the creation of a contact from a reader
func CreateContact(reader io.Reader) (*Contact, error) {
bytes, err := ioutil.ReadAll(reader)
@@ -259,13 +276,44 @@ func ValidateVersion(version *Version) error {
}
var missingFields []string
+ var invalidFields []string
if version.ReleaseDate == "" {
missingFields = append(missingFields, "release_date")
}
+ if version.Downloads != nil {
+ if version.Downloads.XLS != nil {
+ if version.Downloads.XLS.URL == "" {
+ missingFields = append(missingFields, "Downloads.XLS.URL")
+ }
+ if version.Downloads.XLS.Size == "" {
+ missingFields = append(missingFields, "Downloads.XLS.Size")
+ }
+ if _, err := strconv.Atoi(version.Downloads.XLS.Size); err != nil {
+ invalidFields = append(invalidFields, "Downloads.XLS.Size not a number")
+ }
+ }
+
+ if version.Downloads.CSV != nil {
+ if version.Downloads.CSV.URL == "" {
+ missingFields = append(missingFields, "Downloads.CSV.URL")
+ }
+ if version.Downloads.CSV.Size == "" {
+ missingFields = append(missingFields, "Downloads.CSV.Size")
+ }
+ if _, err := strconv.Atoi(version.Downloads.CSV.Size); err != nil {
+ invalidFields = append(invalidFields, "Downloads.CSV.Size not a number")
+ }
+ }
+ }
+
if missingFields != nil {
- return fmt.Errorf("Missing mandatory fields: %v", missingFields)
+ return fmt.Errorf("missing mandatory fields: %v", missingFields)
+ }
+
+ if invalidFields != nil {
+ return fmt.Errorf("invalid fields: %v", invalidFields)
}
return nil
diff --git a/models/dataset_test.go b/models/dataset_test.go
index 7fe4ffb2..ff0662e2 100644
--- a/models/dataset_test.go
+++ b/models/dataset_test.go
@@ -3,11 +3,13 @@ package models
import (
"bytes"
"encoding/json"
- "errors"
+ "fmt"
"os"
+ "reflect"
"testing"
"github.com/ONSdigital/go-ns/log"
+ "github.com/pkg/errors"
. "github.com/smartystreets/goconvey/convey"
)
@@ -87,7 +89,7 @@ func TestCreateDataset(t *testing.T) {
version, err := CreateDataset(r)
So(version, ShouldBeNil)
So(err, ShouldNotBeNil)
- So(err, ShouldResemble, errors.New("Failed to parse json body"))
+ So(err.Error(), ShouldResemble, errors.New("Failed to parse json body").Error())
})
}
@@ -127,7 +129,7 @@ func TestCreateVersion(t *testing.T) {
version, err := CreateVersion(r)
So(version, ShouldBeNil)
So(err, ShouldNotBeNil)
- So(err, ShouldResemble, errors.New("Failed to parse json body"))
+ So(err.Error(), ShouldResemble, errors.New("Failed to parse json body").Error())
})
}
@@ -158,14 +160,14 @@ func TestValidateVersion(t *testing.T) {
err := ValidateVersion(&Version{State: SubmittedState})
So(err, ShouldNotBeNil)
- So(err, ShouldResemble, errors.New("Incorrect state, can be one of the following: edition-confirmed, associated or published"))
+ So(err.Error(), ShouldResemble, errors.New("Incorrect state, can be one of the following: edition-confirmed, associated or published").Error())
})
- Convey("when mandatorey fields are missing from version document when state is set to created", func() {
+ Convey("when mandatory fields are missing from version document when state is set to created", func() {
err := ValidateVersion(&Version{State: EditionConfirmedState})
So(err, ShouldNotBeNil)
- So(err, ShouldResemble, errors.New("Missing mandatory fields: [release_date]"))
+ So(err.Error(), ShouldResemble, errors.New("missing mandatory fields: [release_date]").Error())
})
Convey("when the version state is published but is missing collection_id", func() {
@@ -176,7 +178,61 @@ func TestValidateVersion(t *testing.T) {
err := ValidateVersion(version)
So(err, ShouldNotBeNil)
- So(err, ShouldResemble, errors.New("Missing collection_id for association between version and a collection"))
+ So(err.Error(), ShouldResemble, errors.New("Missing collection_id for association between version and a collection").Error())
+ })
+
+ Convey("when version downloads are invalid", func() {
+ v := &Version{ReleaseDate: "Today", State: EditionConfirmedState}
+
+ v.Downloads = &DownloadList{XLS: &DownloadObject{URL: "", Size: "2"}}
+ assertVersionDownloadError(fmt.Errorf("missing mandatory fields: %v", []string{"Downloads.XLS.URL"}), v)
+
+ v.Downloads = &DownloadList{CSV: &DownloadObject{URL: "", Size: "2"}}
+ assertVersionDownloadError(fmt.Errorf("missing mandatory fields: %v", []string{"Downloads.CSV.URL"}), v)
+
+ v.Downloads = &DownloadList{XLS: &DownloadObject{URL: "/", Size: ""}}
+ assertVersionDownloadError(fmt.Errorf("missing mandatory fields: %v", []string{"Downloads.XLS.Size"}), v)
+
+ v.Downloads = &DownloadList{CSV: &DownloadObject{URL: "/", Size: ""}}
+ assertVersionDownloadError(fmt.Errorf("missing mandatory fields: %v", []string{"Downloads.CSV.Size"}), v)
+
+ v.Downloads = &DownloadList{XLS: &DownloadObject{URL: "/", Size: "bob"}}
+ assertVersionDownloadError(fmt.Errorf("invalid fields: %v", []string{"Downloads.XLS.Size not a number"}), v)
+
+ v.Downloads = &DownloadList{CSV: &DownloadObject{URL: "/", Size: "bob"}}
+ assertVersionDownloadError(fmt.Errorf("invalid fields: %v", []string{"Downloads.CSV.Size not a number"}), v)
})
})
}
+
+func assertVersionDownloadError(expected error, v *Version) {
+ err := ValidateVersion(v)
+ So(err, ShouldNotBeNil)
+ So(err, ShouldResemble, expected)
+}
+
+func TestCreateDownloadList(t *testing.T) {
+ Convey("invalid input bytes return the expected error", t, func() {
+ reader := bytes.NewReader([]byte("hello"))
+ dl, err := CreateDownloadList(reader)
+ So(dl, ShouldBeNil)
+ So(reflect.TypeOf(errors.Cause(err)), ShouldEqual, reflect.TypeOf(&json.SyntaxError{}))
+ })
+
+ Convey("valid input returns the expected value", t, func() {
+ expected := &DownloadList{
+ XLS: &DownloadObject{
+ Size: "1",
+ URL: "2",
+ },
+ }
+
+ input, _ := json.Marshal(expected)
+ reader := bytes.NewReader(input)
+
+ dl, err := CreateDownloadList(reader)
+ So(err, ShouldBeNil)
+ So(dl, ShouldResemble, expected)
+ })
+
+}
diff --git a/models/instance.go b/models/instance.go
index f5b307e7..7b239b05 100644
--- a/models/instance.go
+++ b/models/instance.go
@@ -8,23 +8,42 @@ import (
// Instance which presents a single dataset being imported
type Instance struct {
- Alerts *[]Alert `bson:"alerts,omitempty" json:"alerts,omitempty"`
- InstanceID string `bson:"id,omitempty" json:"id,omitempty"`
- CollectionID string `bson:"collection_id,omitempty" json:"collection_id,omitempty"`
- Dimensions []CodeList `bson:"dimensions,omitempty" json:"dimensions,omitempty"`
- Downloads *DownloadList `bson:"downloads,omitempty" json:"downloads,omitempty"`
- Edition string `bson:"edition,omitempty" json:"edition,omitempty"`
- Events *[]Event `bson:"events,omitempty" json:"events,omitempty"`
- Headers *[]string `bson:"headers,omitempty" json:"headers,omitempty"`
- InsertedObservations *int `bson:"total_inserted_observations,omitempty" json:"total_inserted_observations,omitempty"`
- LatestChanges *[]LatestChange `bson:"latest_changes,omitempty" json:"latest_changes,omitempty"`
- Links *InstanceLinks `bson:"links,omitempty" json:"links,omitempty"`
- ReleaseDate string `bson:"release_date,omitempty" json:"release_date,omitempty"`
- State string `bson:"state,omitempty" json:"state,omitempty"`
- Temporal *[]TemporalFrequency `bson:"temporal,omitempty" json:"temporal,omitempty"`
- TotalObservations *int `bson:"total_observations,omitempty" json:"total_observations,omitempty"`
- Version int `bson:"version,omitempty" json:"version,omitempty"`
- LastUpdated time.Time `bson:"last_updated,omitempty" json:"last_updated,omitempty"`
+ Alerts *[]Alert `bson:"alerts,omitempty" json:"alerts,omitempty"`
+ InstanceID string `bson:"id,omitempty" json:"id,omitempty"`
+ CollectionID string `bson:"collection_id,omitempty" json:"collection_id,omitempty"`
+ Dimensions []CodeList `bson:"dimensions,omitempty" json:"dimensions,omitempty"`
+ Downloads *DownloadList `bson:"downloads,omitempty" json:"downloads,omitempty"`
+ Edition string `bson:"edition,omitempty" json:"edition,omitempty"`
+ Events *[]Event `bson:"events,omitempty" json:"events,omitempty"`
+ Headers *[]string `bson:"headers,omitempty" json:"headers,omitempty"`
+ LatestChanges *[]LatestChange `bson:"latest_changes,omitempty" json:"latest_changes,omitempty"`
+ Links *InstanceLinks `bson:"links,omitempty" json:"links,omitempty"`
+ ReleaseDate string `bson:"release_date,omitempty" json:"release_date,omitempty"`
+ State string `bson:"state,omitempty" json:"state,omitempty"`
+ Temporal *[]TemporalFrequency `bson:"temporal,omitempty" json:"temporal,omitempty"`
+ TotalObservations *int `bson:"total_observations,omitempty" json:"total_observations,omitempty"`
+ Version int `bson:"version,omitempty" json:"version,omitempty"`
+ LastUpdated time.Time `bson:"last_updated,omitempty" json:"last_updated,omitempty"`
+ ImportTasks *InstanceImportTasks `bson:"import_tasks,omitempty" json:"import_tasks"`
+}
+
+// InstanceImportTasks
+type InstanceImportTasks struct {
+ ImportObservations *ImportObservationsTask `bson:"import_observations,omitempty" json:"import_observations"`
+ BuildHierarchyTasks []*BuildHierarchyTask `bson:"build_hierarchies,omitempty" json:"build_hierarchies"`
+}
+
+// ImportObservationsTask represents the task of importing instance observation data into the database.
+type ImportObservationsTask struct {
+ State string `bson:"state,omitempty" json:"state,omitempty"`
+ InsertedObservations int `bson:"total_inserted_observations" json:"total_inserted_observations,omitempty"`
+}
+
+// BuildHierarchyTask represents a task of importing a single hierarchy.
+type BuildHierarchyTask struct {
+ State string `bson:"state,omitempty" json:"state,omitempty"`
+ DimensionName string `bson:"dimension_name,omitempty" json:"dimension_name,omitempty"`
+ CodeListID string `bson:"code_list_id,omitempty" json:"code_list_id,omitempty"`
}
// CodeList for a dimension within an instance
@@ -68,7 +87,7 @@ type InstanceResults struct {
// Validate the event structure
func (e *Event) Validate() error {
if e.Message == "" || e.MessageOffset == "" || e.Time == nil || e.Type == "" {
- return errors.New("Missing properties")
+ return errors.New("missing properties")
}
return nil
}
@@ -93,7 +112,7 @@ func ValidateStateFilter(filterList []string) error {
}
if invalidFilterStateValues != nil {
- err := fmt.Errorf("Bad request - invalid filter state values: %v", invalidFilterStateValues)
+ err := fmt.Errorf("bad request - invalid filter state values: %v", invalidFilterStateValues)
return err
}
@@ -109,7 +128,7 @@ func ValidateInstanceState(state string) error {
}
if invalidInstantStateValues != nil {
- err := fmt.Errorf("Bad request - invalid filter state values: %v", invalidInstantStateValues)
+ err := fmt.Errorf("bad request - invalid filter state values: %v", invalidInstantStateValues)
return err
}
diff --git a/models/instance_test.go b/models/instance_test.go
index 46f40340..dd250b2f 100644
--- a/models/instance_test.go
+++ b/models/instance_test.go
@@ -58,14 +58,14 @@ func TestValidateStateFilter(t *testing.T) {
err := ValidateStateFilter([]string{"foo"})
So(err, ShouldNotBeNil)
- So(err, ShouldResemble, errors.New("Bad request - invalid filter state values: [foo]"))
+ So(err, ShouldResemble, errors.New("bad request - invalid filter state values: [foo]"))
})
Convey("when the filter list contains more than one invalid state", func() {
err := ValidateStateFilter([]string{"foo", "bar"})
So(err, ShouldNotBeNil)
- So(err, ShouldResemble, errors.New("Bad request - invalid filter state values: [foo bar]"))
+ So(err, ShouldResemble, errors.New("bad request - invalid filter state values: [foo bar]"))
})
})
}
diff --git a/models/metadata.go b/models/metadata.go
index 7bedb483..19399234 100644
--- a/models/metadata.go
+++ b/models/metadata.go
@@ -1,8 +1,9 @@
package models
import (
- "github.com/ONSdigital/dp-dataset-api/url"
"strconv"
+
+ "github.com/ONSdigital/dp-dataset-api/url"
)
// Metadata represents information (metadata) relevant to a version
@@ -33,6 +34,7 @@ type Metadata struct {
URI string `json:"uri,omitempty"`
}
+// MetadataLinks represents a link object to list of metadata) relevant to a version
type MetadataLinks struct {
AccessRights *LinkObject `json:"access_rights,omitempty"`
Self *LinkObject `json:"self,omitempty"`
@@ -41,6 +43,7 @@ type MetadataLinks struct {
WebsiteVersion *LinkObject `json:"website_version,omitempty"`
}
+// CreateMetaDataDoc manages the creation of metadata across dataset and version docs
func CreateMetaDataDoc(datasetDoc *Dataset, versionDoc *Version, urlBuilder *url.Builder) *Metadata {
metaDataDoc := &Metadata{
Alerts: versionDoc.Alerts,
@@ -104,11 +107,11 @@ func getDistribution(downloads *DownloadList) []string {
distribution := []string{"json"}
if downloads != nil {
- if downloads.CSV != nil || downloads.CSV.URL != "" {
+ if downloads.CSV != nil && downloads.CSV.URL != "" {
distribution = append(distribution, "csv")
}
- if downloads.XLS != nil || downloads.XLS.URL != "" {
+ if downloads.XLS != nil && downloads.XLS.URL != "" {
distribution = append(distribution, "xls")
}
}
diff --git a/models/test_data.go b/models/test_data.go
index 6e49e77e..d2ddbda9 100644
--- a/models/test_data.go
+++ b/models/test_data.go
@@ -125,11 +125,11 @@ var dimension = CodeList{
var downloads = DownloadList{
CSV: &DownloadObject{
URL: "https://www.aws/123",
- Size: "25mb",
+ Size: "25",
},
XLS: &DownloadObject{
URL: "https://www.aws/1234",
- Size: "45mb",
+ Size: "45",
},
}
diff --git a/mongo/dataset_store.go b/mongo/dataset_store.go
index 6430bc1c..bc1aac51 100644
--- a/mongo/dataset_store.go
+++ b/mongo/dataset_store.go
@@ -477,9 +477,11 @@ func createVersionUpdateQuery(version *models.Version) bson.M {
updates["release_date"] = version.ReleaseDate
}
- if version.Links.Spatial != nil {
- if version.Links.Spatial.HRef != "" {
- updates["links.spatial.href"] = version.Links.Spatial.HRef
+ if version.Links != nil {
+ if version.Links.Spatial != nil {
+ if version.Links.Spatial.HRef != "" {
+ updates["links.spatial.href"] = version.Links.Spatial.HRef
+ }
}
}
@@ -491,6 +493,10 @@ func createVersionUpdateQuery(version *models.Version) bson.M {
updates["temporal"] = version.Temporal
}
+ if version.Downloads != nil {
+ updates["downloads"] = version.Downloads
+ }
+
return updates
}
diff --git a/mongo/instance_store.go b/mongo/instance_store.go
index 912953fb..3d47d163 100644
--- a/mongo/instance_store.go
+++ b/mongo/instance_store.go
@@ -126,7 +126,9 @@ func (m *Mongo) UpdateObservationInserted(id string, observationInserted int64)
defer s.Close()
err := s.DB(m.Database).C(instanceCollection).Update(bson.M{"id": id},
- bson.M{"$inc": bson.M{"total_inserted_observations": observationInserted}, "$set": bson.M{"last_updated": time.Now().UTC()}})
+ bson.M{
+ "$inc": bson.M{"import_tasks.import_observations.total_inserted_observations": observationInserted},
+ "$set": bson.M{"last_updated": time.Now().UTC()}})
if err == mgo.ErrNotFound {
return errs.ErrInstanceNotFound
@@ -138,3 +140,44 @@ func (m *Mongo) UpdateObservationInserted(id string, observationInserted int64)
return nil
}
+
+// UpdateImportObservationsTaskState to the given state.
+func (m *Mongo) UpdateImportObservationsTaskState(id string, state string) error {
+ s := m.Session.Copy()
+ defer s.Close()
+
+ err := s.DB(m.Database).C(instanceCollection).Update(bson.M{"id": id},
+ bson.M{
+ "$set": bson.M{"import_tasks.import_observations.state": state},
+ "$currentDate": bson.M{"last_updated": true},
+ })
+
+ if err == mgo.ErrNotFound {
+ return errs.ErrInstanceNotFound
+ }
+
+ if err != nil {
+ return err
+ }
+
+ return nil
+}
+
+// UpdateBuildHierarchyTaskState updates the state of a build hierarchy task.
+func (m *Mongo) UpdateBuildHierarchyTaskState(id, dimension, state string) (err error) {
+ s := m.Session.Copy()
+ defer s.Close()
+
+ selector := bson.M{
+ "id": id,
+ "import_tasks.build_hierarchies.dimension_name": dimension,
+ }
+
+ update := bson.M{
+ "$set": bson.M{"import_tasks.build_hierarchies.$.state": state},
+ "$currentDate": bson.M{"last_updated": true},
+ }
+
+ err = s.DB(m.Database).C(instanceCollection).Update(selector, update)
+ return
+}
diff --git a/schema/schema.go b/schema/schema.go
new file mode 100644
index 00000000..74e758d4
--- /dev/null
+++ b/schema/schema.go
@@ -0,0 +1,20 @@
+package schema
+
+import "github.com/ONSdigital/go-ns/avro"
+
+var generateDownloads = `{
+ "type": "record",
+ "name": "filter-output-submitted",
+ "fields": [
+ {"name": "filter_output_id", "type": "string", "default": ""},
+ {"name": "instance_id", "type": "string", "default": ""},
+ {"name": "dataset_id", "type": "string", "default": ""},
+ {"name": "edition", "type": "string", "default": ""},
+ {"name": "version", "type": "string", "default": ""}
+ ]
+}`
+
+// GenerateDownloadsEvent the Avro schema for FilterOutputSubmitted messages.
+var GenerateDownloadsEvent = &avro.Schema{
+ Definition: generateDownloads,
+}
diff --git a/store/datastore.go b/store/datastore.go
index 8b4e8cdf..3c335797 100644
--- a/store/datastore.go
+++ b/store/datastore.go
@@ -41,6 +41,8 @@ type Storer interface {
UpdateEdition(datasetID, edition string, latestVersion *models.Version) error
UpdateInstance(ID string, instance *models.Instance) error
UpdateObservationInserted(ID string, observationInserted int64) error
+ UpdateImportObservationsTaskState(id, state string) error
+ UpdateBuildHierarchyTaskState(id, dimension, state string) error
UpdateVersion(ID string, version *models.Version) error
UpsertContact(ID string, update interface{}) error
UpsertDataset(ID string, datasetDoc *models.DatasetUpdate) error
diff --git a/store/datastoretest/datastore.go b/store/datastoretest/datastore.go
index 3a2c69f3..3ccd3139 100755
--- a/store/datastoretest/datastore.go
+++ b/store/datastoretest/datastore.go
@@ -12,36 +12,38 @@ import (
)
var (
- lockStorerMockAddDimensionToInstance sync.RWMutex
- lockStorerMockAddEventToInstance sync.RWMutex
- lockStorerMockAddInstance sync.RWMutex
- lockStorerMockCheckDatasetExists sync.RWMutex
- lockStorerMockCheckEditionExists sync.RWMutex
- lockStorerMockGetDataset sync.RWMutex
- lockStorerMockGetDatasets sync.RWMutex
- lockStorerMockGetDimensionNodesFromInstance sync.RWMutex
- lockStorerMockGetDimensionOptions sync.RWMutex
- lockStorerMockGetDimensions sync.RWMutex
- lockStorerMockGetEdition sync.RWMutex
- lockStorerMockGetEditions sync.RWMutex
- lockStorerMockGetInstance sync.RWMutex
- lockStorerMockGetInstances sync.RWMutex
- lockStorerMockGetNextVersion sync.RWMutex
- lockStorerMockGetUniqueDimensionValues sync.RWMutex
- lockStorerMockGetVersion sync.RWMutex
- lockStorerMockGetVersions sync.RWMutex
- lockStorerMockPing sync.RWMutex
- lockStorerMockUpdateDataset sync.RWMutex
- lockStorerMockUpdateDatasetWithAssociation sync.RWMutex
- lockStorerMockUpdateDimensionNodeID sync.RWMutex
- lockStorerMockUpdateEdition sync.RWMutex
- lockStorerMockUpdateInstance sync.RWMutex
- lockStorerMockUpdateObservationInserted sync.RWMutex
- lockStorerMockUpdateVersion sync.RWMutex
- lockStorerMockUpsertContact sync.RWMutex
- lockStorerMockUpsertDataset sync.RWMutex
- lockStorerMockUpsertEdition sync.RWMutex
- lockStorerMockUpsertVersion sync.RWMutex
+ lockStorerMockAddDimensionToInstance sync.RWMutex
+ lockStorerMockAddEventToInstance sync.RWMutex
+ lockStorerMockAddInstance sync.RWMutex
+ lockStorerMockCheckDatasetExists sync.RWMutex
+ lockStorerMockCheckEditionExists sync.RWMutex
+ lockStorerMockGetDataset sync.RWMutex
+ lockStorerMockGetDatasets sync.RWMutex
+ lockStorerMockGetDimensionNodesFromInstance sync.RWMutex
+ lockStorerMockGetDimensionOptions sync.RWMutex
+ lockStorerMockGetDimensions sync.RWMutex
+ lockStorerMockGetEdition sync.RWMutex
+ lockStorerMockGetEditions sync.RWMutex
+ lockStorerMockGetInstance sync.RWMutex
+ lockStorerMockGetInstances sync.RWMutex
+ lockStorerMockGetNextVersion sync.RWMutex
+ lockStorerMockGetUniqueDimensionValues sync.RWMutex
+ lockStorerMockGetVersion sync.RWMutex
+ lockStorerMockGetVersions sync.RWMutex
+ lockStorerMockPing sync.RWMutex
+ lockStorerMockUpdateBuildHierarchyTaskState sync.RWMutex
+ lockStorerMockUpdateDataset sync.RWMutex
+ lockStorerMockUpdateDatasetWithAssociation sync.RWMutex
+ lockStorerMockUpdateDimensionNodeID sync.RWMutex
+ lockStorerMockUpdateEdition sync.RWMutex
+ lockStorerMockUpdateImportObservationsTaskState sync.RWMutex
+ lockStorerMockUpdateInstance sync.RWMutex
+ lockStorerMockUpdateObservationInserted sync.RWMutex
+ lockStorerMockUpdateVersion sync.RWMutex
+ lockStorerMockUpsertContact sync.RWMutex
+ lockStorerMockUpsertDataset sync.RWMutex
+ lockStorerMockUpsertEdition sync.RWMutex
+ lockStorerMockUpsertVersion sync.RWMutex
)
// StorerMock is a mock implementation of Storer.
@@ -107,6 +109,9 @@ var (
// PingFunc: func(ctx context.Context) (time.Time, error) {
// panic("TODO: mock out the Ping method")
// },
+// UpdateBuildHierarchyTaskStateFunc: func(id string, dimension string, state string) error {
+// panic("TODO: mock out the UpdateBuildHierarchyTaskState method")
+// },
// UpdateDatasetFunc: func(ID string, dataset *models.Dataset) error {
// panic("TODO: mock out the UpdateDataset method")
// },
@@ -119,6 +124,9 @@ var (
// UpdateEditionFunc: func(datasetID string, edition string, latestVersion *models.Version) error {
// panic("TODO: mock out the UpdateEdition method")
// },
+// UpdateImportObservationsTaskStateFunc: func(id string, state string) error {
+// panic("TODO: mock out the UpdateImportObservationsTaskState method")
+// },
// UpdateInstanceFunc: func(ID string, instance *models.Instance) error {
// panic("TODO: mock out the UpdateInstance method")
// },
@@ -204,6 +212,9 @@ type StorerMock struct {
// PingFunc mocks the Ping method.
PingFunc func(ctx context.Context) (time.Time, error)
+ // UpdateBuildHierarchyTaskStateFunc mocks the UpdateBuildHierarchyTaskState method.
+ UpdateBuildHierarchyTaskStateFunc func(id string, dimension string, state string) error
+
// UpdateDatasetFunc mocks the UpdateDataset method.
UpdateDatasetFunc func(ID string, dataset *models.Dataset) error
@@ -216,6 +227,9 @@ type StorerMock struct {
// UpdateEditionFunc mocks the UpdateEdition method.
UpdateEditionFunc func(datasetID string, edition string, latestVersion *models.Version) error
+ // UpdateImportObservationsTaskStateFunc mocks the UpdateImportObservationsTaskState method.
+ UpdateImportObservationsTaskStateFunc func(id string, state string) error
+
// UpdateInstanceFunc mocks the UpdateInstance method.
UpdateInstanceFunc func(ID string, instance *models.Instance) error
@@ -368,6 +382,15 @@ type StorerMock struct {
// Ctx is the ctx argument value.
Ctx context.Context
}
+ // UpdateBuildHierarchyTaskState holds details about calls to the UpdateBuildHierarchyTaskState method.
+ UpdateBuildHierarchyTaskState []struct {
+ // Id is the id argument value.
+ Id string
+ // Dimension is the dimension argument value.
+ Dimension string
+ // State is the state argument value.
+ State string
+ }
// UpdateDataset holds details about calls to the UpdateDataset method.
UpdateDataset []struct {
// ID is the ID argument value.
@@ -398,6 +421,13 @@ type StorerMock struct {
// LatestVersion is the latestVersion argument value.
LatestVersion *models.Version
}
+ // UpdateImportObservationsTaskState holds details about calls to the UpdateImportObservationsTaskState method.
+ UpdateImportObservationsTaskState []struct {
+ // Id is the id argument value.
+ Id string
+ // State is the state argument value.
+ State string
+ }
// UpdateInstance holds details about calls to the UpdateInstance method.
UpdateInstance []struct {
// ID is the ID argument value.
@@ -1108,6 +1138,45 @@ func (mock *StorerMock) PingCalls() []struct {
return calls
}
+// UpdateBuildHierarchyTaskState calls UpdateBuildHierarchyTaskStateFunc.
+func (mock *StorerMock) UpdateBuildHierarchyTaskState(id string, dimension string, state string) error {
+ if mock.UpdateBuildHierarchyTaskStateFunc == nil {
+ panic("moq: StorerMock.UpdateBuildHierarchyTaskStateFunc is nil but Storer.UpdateBuildHierarchyTaskState was just called")
+ }
+ callInfo := struct {
+ Id string
+ Dimension string
+ State string
+ }{
+ Id: id,
+ Dimension: dimension,
+ State: state,
+ }
+ lockStorerMockUpdateBuildHierarchyTaskState.Lock()
+ mock.calls.UpdateBuildHierarchyTaskState = append(mock.calls.UpdateBuildHierarchyTaskState, callInfo)
+ lockStorerMockUpdateBuildHierarchyTaskState.Unlock()
+ return mock.UpdateBuildHierarchyTaskStateFunc(id, dimension, state)
+}
+
+// UpdateBuildHierarchyTaskStateCalls gets all the calls that were made to UpdateBuildHierarchyTaskState.
+// Check the length with:
+// len(mockedStorer.UpdateBuildHierarchyTaskStateCalls())
+func (mock *StorerMock) UpdateBuildHierarchyTaskStateCalls() []struct {
+ Id string
+ Dimension string
+ State string
+} {
+ var calls []struct {
+ Id string
+ Dimension string
+ State string
+ }
+ lockStorerMockUpdateBuildHierarchyTaskState.RLock()
+ calls = mock.calls.UpdateBuildHierarchyTaskState
+ lockStorerMockUpdateBuildHierarchyTaskState.RUnlock()
+ return calls
+}
+
// UpdateDataset calls UpdateDatasetFunc.
func (mock *StorerMock) UpdateDataset(ID string, dataset *models.Dataset) error {
if mock.UpdateDatasetFunc == nil {
@@ -1252,6 +1321,41 @@ func (mock *StorerMock) UpdateEditionCalls() []struct {
return calls
}
+// UpdateImportObservationsTaskState calls UpdateImportObservationsTaskStateFunc.
+func (mock *StorerMock) UpdateImportObservationsTaskState(id string, state string) error {
+ if mock.UpdateImportObservationsTaskStateFunc == nil {
+ panic("moq: StorerMock.UpdateImportObservationsTaskStateFunc is nil but Storer.UpdateImportObservationsTaskState was just called")
+ }
+ callInfo := struct {
+ Id string
+ State string
+ }{
+ Id: id,
+ State: state,
+ }
+ lockStorerMockUpdateImportObservationsTaskState.Lock()
+ mock.calls.UpdateImportObservationsTaskState = append(mock.calls.UpdateImportObservationsTaskState, callInfo)
+ lockStorerMockUpdateImportObservationsTaskState.Unlock()
+ return mock.UpdateImportObservationsTaskStateFunc(id, state)
+}
+
+// UpdateImportObservationsTaskStateCalls gets all the calls that were made to UpdateImportObservationsTaskState.
+// Check the length with:
+// len(mockedStorer.UpdateImportObservationsTaskStateCalls())
+func (mock *StorerMock) UpdateImportObservationsTaskStateCalls() []struct {
+ Id string
+ State string
+} {
+ var calls []struct {
+ Id string
+ State string
+ }
+ lockStorerMockUpdateImportObservationsTaskState.RLock()
+ calls = mock.calls.UpdateImportObservationsTaskState
+ lockStorerMockUpdateImportObservationsTaskState.RUnlock()
+ return calls
+}
+
// UpdateInstance calls UpdateInstanceFunc.
func (mock *StorerMock) UpdateInstance(ID string, instance *models.Instance) error {
if mock.UpdateInstanceFunc == nil {
diff --git a/swagger.yaml b/swagger.yaml
index 7f3f5049..8b5bf1bf 100644
--- a/swagger.yaml
+++ b/swagger.yaml
@@ -129,6 +129,12 @@ parameters:
in: path
required: true
type: string
+ import_tasks:
+ name: import_tasks
+ description: "A request body to update the state of an import task"
+ in: body
+ schema:
+ $ref: '#/definitions/ImportTasks'
securityDefinitions:
FlorenceAPIKey:
name: florence-token
@@ -187,6 +193,7 @@ paths:
summary: "Create a dataset"
description: "Create a dataset provided by the ONS that can be filtered using the filter API"
parameters:
+ - $ref: '#/parameters/id'
- $ref: '#/parameters/new_dataset'
produces:
- "application/json"
@@ -314,7 +321,8 @@ paths:
description: "Update a version for an edition of a dataset, if the state is changed to associated or published, the parent documents(dataset and edition resources) will also be updated. A version can only be updated if the state is not published"
parameters:
- $ref: '#/parameters/id'
- - $ref: '#/parameters/new_version'
+ - $ref: '#/parameters/edition'
+ - $ref: '#/parameters/version'
responses:
201:
description: "A json object containing a version"
@@ -667,6 +675,28 @@ paths:
description: "InstanceId does not match any instances"
500:
$ref: '#/responses/InternalError'
+ /instances/{instance_id}/import_tasks:
+ put:
+ tags:
+ - "Private"
+ summary: "Update the state of an import task in an instance"
+ description: "The instance import process involves multiple tasks. This endpoint updates the state of an import task."
+ parameters:
+ - $ref: '#/parameters/instance_id'
+ - $ref: '#/parameters/import_tasks'
+ security:
+ - InternalAPIKey: []
+ responses:
+ 200:
+ description: "Updated the state of the import task"
+ 400:
+ $ref: '#/responses/InvalidRequestError'
+ 401:
+ $ref: '#/responses/UnauthorisedError'
+ 404:
+ description: "InstanceId does not match any instances"
+ 500:
+ $ref: '#/responses/InternalError'
/instances/{instance_id}/dimensions/{dimension}/options/{option}:
put:
tags:
@@ -1036,6 +1066,8 @@ definitions:
description: "The header information from a V4 file"
items:
type: string
+ import_tasks:
+ $ref: '#/definitions/ImportTasks'
links:
readOnly: true
type: object
@@ -1123,9 +1155,6 @@ definitions:
$ref: '#/definitions/State'
temporal:
$ref: '#/definitions/Temporal'
- total_inserted_observations:
- type: integer
- description: "The number of inserted observations in this instance"
total_observations:
type: integer
description: "The number of observations in this instance"
@@ -1136,6 +1165,32 @@ definitions:
type: string
readOnly: true
description: "The last time an event happened"
+ ImportTasks:
+ type: object
+ properties:
+ import_observations:
+ type: object
+ properties:
+ total_inserted_observations:
+ type: integer
+ description: "The number of inserted observations in this instance"
+ state:
+ type: string
+ description: "The state of the import observations task"
+ build_hierarchies:
+ type: array
+ items:
+ type: object
+ properties:
+ state:
+ type: string
+ description: "The state of the import observations task"
+ dimension_name:
+ type: string
+ description: "The name of the dimension the hierarchy represents"
+ code_list_id:
+ type: string
+ description: "The ID of the codelist that this hierarchy represents"
Codelist:
type: object
properties:
@@ -1747,4 +1802,4 @@ definitions:
type: string
href:
description: "A URL to the version this resource relates to"
- type: string
+ type: string
\ No newline at end of file
diff --git a/vendor/github.com/ONSdigital/go-ns/avro/avro.go b/vendor/github.com/ONSdigital/go-ns/avro/avro.go
new file mode 100644
index 00000000..cc4376cd
--- /dev/null
+++ b/vendor/github.com/ONSdigital/go-ns/avro/avro.go
@@ -0,0 +1,442 @@
+// Package avro provides a user functionality to return the
+// avro encoding of s.
+package avro
+
+import (
+ "bytes"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "reflect"
+
+ "github.com/go-avro/avro"
+)
+
+// Schema contains the schema definition necessary to generate an avro record
+type Schema struct {
+ Definition string
+}
+
+// ErrUnsupportedType is returned if the interface isn't a
+// pointer to a struct
+func ErrUnsupportedType(typ reflect.Kind) error {
+ return fmt.Errorf("Unsupported interface type: %v", typ)
+}
+
+// ErrUnsupportedFieldType is returned for unsupported field types.
+var ErrUnsupportedFieldType = errors.New("Unsupported field type")
+
+// ErrMissingNestedScema is returned when nested schemas are missing from the parent
+var ErrMissingNestedScema = errors.New("nested schema missing from parent")
+
+// Marshal is used to avro encode the interface of s.
+func (schema *Schema) Marshal(s interface{}) ([]byte, error) {
+ v := reflect.ValueOf(s)
+
+ if v.Kind() == reflect.PtrTo(reflect.TypeOf(s)).Kind() {
+ v = reflect.Indirect(v)
+ }
+
+ // Only structs are supported so return an empty result if the passed object
+ // isn't a struct.
+ if v.Kind() != reflect.Struct {
+ return nil, ErrUnsupportedType(v.Kind())
+ }
+
+ // If a pointer to a struct is passed, get the type of the dereferenced object.
+ typ := reflect.TypeOf(s)
+ if typ.Kind() == reflect.Ptr {
+ typ = typ.Elem()
+ }
+
+ // Check for unsupported interface types
+ err := checkFieldType(v, typ)
+ if err != nil {
+ return nil, err
+ }
+
+ avroSchema, err := avro.ParseSchema(schema.Definition)
+ if err != nil {
+ return nil, err
+ }
+
+ record, err := getRecord(avroSchema, v, typ)
+ if err != nil {
+ return nil, err
+ }
+
+ writer := avro.NewGenericDatumWriter()
+ writer.SetSchema(avroSchema)
+
+ buffer := new(bytes.Buffer)
+ encoder := avro.NewBinaryEncoder(buffer)
+
+ err = writer.Write(record, encoder)
+ if err != nil {
+ return nil, err
+ }
+
+ return buffer.Bytes(), nil
+}
+
+// Unmarshal is used to parse the avro encoded data and store the
+// result in the value pointed to by s.
+func (schema *Schema) Unmarshal(message []byte, s interface{}) error {
+ v := reflect.ValueOf(s)
+ vp := reflect.ValueOf(s)
+
+ if v.Kind() == reflect.PtrTo(reflect.TypeOf(s)).Kind() {
+ v = reflect.Indirect(v)
+ }
+
+ // Only structs are supported so return an empty result if the passed object
+ // isn't a struct.
+ if v.Kind() != reflect.Struct {
+ return ErrUnsupportedType(v.Kind())
+ }
+
+ // If a pointer to a struct is passed, get the type of the dereferenced object.
+ typ := reflect.TypeOf(s)
+ if typ.Kind() == reflect.Ptr {
+ typ = typ.Elem()
+ }
+
+ return populateStructFromSchema(schema.Definition, message, typ, v, vp)
+}
+
+func checkFieldType(v reflect.Value, t reflect.Type) error {
+ for i := 0; i < v.NumField(); i++ {
+ fieldTag := t.Field(i).Tag.Get("avro")
+ if fieldTag == "-" {
+ continue
+ }
+ fieldType := t.Field(i)
+
+ if !isValidType(fieldType.Type.Kind()) {
+ return ErrUnsupportedFieldType
+ }
+ }
+
+ return nil
+}
+
+func generateDecodedRecord(schema string, message []byte) (*avro.GenericRecord, error) {
+ avroSchema, err := avro.ParseSchema(schema)
+ if err != nil {
+ return nil, err
+ }
+
+ reader := avro.NewGenericDatumReader()
+ reader.SetSchema(avroSchema)
+ decoder := avro.NewBinaryDecoder(message)
+ decodedRecord := avro.NewGenericRecord(avroSchema)
+
+ err = reader.Read(decodedRecord, decoder)
+ if err != nil {
+ return nil, err
+ }
+
+ return decodedRecord, nil
+}
+
+func getNestedSchema(avroSchema avro.Schema, fieldTag string, v reflect.Value, typ reflect.Type) (avro.Schema, error) {
+ // Unmarshal parent avro schema into map
+ var schemaMap map[string]interface{}
+ if err := json.Unmarshal([]byte(avroSchema.String()), &schemaMap); err != nil {
+ return nil, err
+ }
+
+ // Get fields section from parent schema map
+ fields := schemaMap["fields"].([]interface{})
+ for _, field := range fields {
+ var fld map[string]interface{}
+ var ok bool
+
+ if fld, ok = field.(map[string]interface{}); !ok {
+ continue
+ }
+
+ // Iterate through each field until the nested schema field is found
+ if fld["name"].(string) == fieldTag {
+ var avroFieldType map[string]interface{}
+
+ // The nested schema is inside the type element of the required field
+ if avroFieldType, ok = fld["type"].(map[string]interface{}); !ok {
+ var avroFieldTypes []interface{}
+ if avroFieldTypes, ok = fld["type"].([]interface{}); !ok {
+ continue
+ }
+
+ // If the nested schema could potentially be "null", then the schema is the second type
+ // element rather than the first
+ if avroFieldType = avroFieldTypes[1].(map[string]interface{}); !ok {
+ continue
+ }
+ }
+
+ // Marshal the nested schema map into json
+ nestedSchemaBytes, err := json.Marshal(avroFieldType)
+ if err != nil {
+ return nil, err
+ }
+
+ return avro.ParseSchema(string(nestedSchemaBytes))
+ }
+ }
+ return nil, ErrMissingNestedScema
+}
+
+func getRecord(avroSchema avro.Schema, v reflect.Value, typ reflect.Type) (*avro.GenericRecord, error) {
+ record := avro.NewGenericRecord(avroSchema)
+
+ for i := 0; i < v.NumField(); i++ {
+ fieldTag := typ.Field(i).Tag.Get("avro")
+ if fieldTag == "-" {
+ continue
+ }
+ fieldName := typ.Field(i).Name
+
+ switch typ.Field(i).Type.Kind() {
+ case reflect.Bool:
+ value := v.FieldByName(fieldName).Bool()
+ record.Set(fieldTag, value)
+ case reflect.String:
+ value := v.FieldByName(fieldName).String()
+ record.Set(fieldTag, value)
+ case reflect.Int32:
+ value := v.FieldByName(fieldName).Interface().(int32)
+ record.Set(fieldTag, value)
+ case reflect.Int64:
+ value := v.FieldByName(fieldName).Interface().(int64)
+ record.Set(fieldTag, value)
+ case reflect.Slice:
+ if err := marshalSlice(record, v, i, fieldTag, avroSchema); err != nil {
+ return nil, err
+ }
+ case reflect.Struct:
+ nestedSchema, err := getNestedSchema(avroSchema, fieldTag, v, typ)
+ if err != nil {
+ return nil, err
+ }
+
+ nestedRecord, err := getRecord(nestedSchema, v.Field(i), typ.Field(i).Type)
+ if err != nil {
+ return nil, err
+ }
+
+ record.Set(fieldTag, nestedRecord)
+ }
+ }
+
+ return record, nil
+}
+
+func isValidType(kind reflect.Kind) bool {
+ supportedTypes := []reflect.Kind{
+ reflect.Bool,
+ reflect.Int32,
+ reflect.Int64,
+ reflect.Slice,
+ reflect.String,
+ reflect.Struct,
+ }
+
+ for _, supportedType := range supportedTypes {
+ if supportedType == kind {
+ return true
+ }
+ }
+ return false
+}
+
+func marshalSlice(record *avro.GenericRecord, v reflect.Value, i int, fieldTag string, avroSchema avro.Schema) error {
+ // This switch statement will need to be extended to support other native types,
+ // Currently supports strings and structs.
+ switch v.Field(i).Type().Elem().Kind() {
+ case reflect.String:
+ slice := marshalStringSlice(v, i)
+ record.Set(fieldTag, slice)
+ case reflect.Struct:
+ slice, err := marshalStructSlice(v, i, avroSchema, fieldTag)
+ if err != nil {
+ return err
+ }
+ record.Set(fieldTag, slice)
+ }
+ return nil
+}
+
+func marshalStringSlice(v reflect.Value, i int) []interface{} {
+ vals := v.Field(i)
+ var slice []interface{}
+ for j := 0; j < vals.Len(); j++ {
+ slice = append(slice, vals.Index(j).Interface())
+ }
+ return slice
+}
+
+func marshalStructSlice(v reflect.Value, i int, avroSchema avro.Schema, fieldTag string) ([]interface{}, error) {
+ vals := v.Field(i)
+ var slice []interface{}
+ for j := 0; j < vals.Len(); j++ {
+ arraySchema, err := getArraySchema(avroSchema, fieldTag)
+ if err != nil {
+ return nil, err
+ }
+
+ arrayRecord, err := getRecord(arraySchema, vals.Index(j), v.Field(i).Type().Elem())
+ if err != nil {
+ return nil, err
+ }
+
+ slice = append(slice, arrayRecord)
+ }
+ return slice, nil
+}
+
+func getArraySchema(avroSchema avro.Schema, fieldTag string) (avro.Schema, error) {
+ var schemaMap map[string]interface{}
+ // Unmarshal the parent schema into a map
+ if err := json.Unmarshal([]byte(avroSchema.String()), &schemaMap); err != nil {
+ return nil, err
+ }
+
+ fields := schemaMap["fields"].([]interface{})
+ for _, field := range fields {
+ var fld map[string]interface{}
+ var ok bool
+
+ if fld, ok = field.(map[string]interface{}); !ok {
+ continue
+ }
+
+ // Iterate through fields in schema until fieldTag matches the name element
+ // of the field
+ if fld["name"].(string) == fieldTag {
+ // The array schema will be inside the type element of the requested field.
+ // Marshal this schema back to json and return
+ arraySchemaBytes, err := json.Marshal(fld["type"])
+ if err != nil {
+ return nil, err
+ }
+
+ return avro.ParseSchema(string(arraySchemaBytes))
+ }
+ }
+
+ return nil, ErrMissingNestedScema
+}
+
+func populateNestedArrayItem(nestedMap map[string]interface{}, typ reflect.Type) reflect.Value {
+ // Create a new instance of required struct type
+ v := reflect.Indirect(reflect.New(typ))
+ for i := 0; i < v.NumField(); i++ {
+ field := typ.Field(i).Tag.Get("avro")
+ fieldValue := nestedMap[field]
+ if fieldValue != nil {
+ if v.Field(i).Kind() == reflect.Struct {
+ setNestedStructs(fieldValue.(map[string]interface{}), v.Field(i), typ.Field(i).Type)
+ continue
+ }
+ value := reflect.ValueOf(fieldValue)
+ if typ.Field(i).Type.Kind() == reflect.Slice {
+ sliceInterface := fieldValue.([]interface{})
+ sliceString := make([]string, len(sliceInterface))
+ for _, val := range sliceInterface {
+ sliceString = append(sliceString, val.(string))
+ }
+ value = reflect.ValueOf(sliceString)
+ }
+ v.Field(i).Set(value)
+ }
+ }
+ return v
+}
+
+func populateStructFromSchema(schema string, message []byte, typ reflect.Type, v, vp reflect.Value) error {
+ decodedRecord, err := generateDecodedRecord(schema, message)
+ if err != nil {
+ return err
+ }
+
+ for i := 0; i < v.NumField(); i++ {
+ field := typ.Field(i).Tag.Get("avro")
+ if field == "-" {
+ continue
+ }
+ rawFieldName := typ.Field(i).Name
+ fieldName := vp.Elem().FieldByName(rawFieldName)
+
+ value := decodedRecord.Get(field)
+
+ if fieldName.IsValid() {
+ if v.Field(i).Type().Kind() == reflect.Slice {
+ switch v.Field(i).Type().Elem().Kind() {
+ case reflect.String:
+ value = unmarshalStringSlice(value)
+ case reflect.Struct:
+ v, err = unmarshalStructSlice(value, v, i)
+ if err != nil {
+ return err
+ }
+ continue
+ default:
+ return ErrUnsupportedType(v.Field(i).Type().Elem().Kind())
+ }
+ }
+ fieldName.Set(reflect.ValueOf(value))
+ }
+ }
+
+ return nil
+}
+
+func setNestedStructs(nestedMap map[string]interface{}, v reflect.Value, typ reflect.Type) {
+ for i := 0; i < v.NumField(); i++ {
+ field := typ.Field(i).Tag.Get("avro")
+ fieldValue := nestedMap[field]
+ if fieldValue != nil {
+ if v.Field(i).Kind() == reflect.Struct {
+ setNestedStructs(fieldValue.(map[string]interface{}), v.Field(i), typ.Field(i).Type)
+ continue
+ }
+ value := reflect.ValueOf(fieldValue)
+ if typ.Field(i).Type.Kind() == reflect.Slice {
+ sliceInterface := fieldValue.([]interface{})
+ sliceString := make([]string, len(sliceInterface))
+ for i, val := range sliceInterface {
+ sliceString[i] = val.(string)
+ }
+ value = reflect.ValueOf(sliceString)
+ }
+ v.Field(i).Set(value)
+ }
+ }
+}
+
+func unmarshalStringSlice(value interface{}) []string {
+ sliceInterface := value.([]interface{})
+ sliceString := make([]string, len(sliceInterface))
+ for _, val := range sliceInterface {
+ sliceString = append(sliceString, val.(string))
+ }
+ return sliceString
+}
+
+func unmarshalStructSlice(value interface{}, v reflect.Value, i int) (reflect.Value, error) {
+ sliceInterface := value.([]interface{})
+ sliceType := v.Field(i).Type().Elem()
+ emptySlice := reflect.MakeSlice(reflect.SliceOf(sliceType), 0, 0)
+ for _, val := range sliceInterface {
+ record := val.(*avro.GenericRecord)
+
+ var dataMap map[string]interface{}
+ if err := json.Unmarshal([]byte(record.String()), &dataMap); err != nil {
+ return v, err
+ }
+ item := populateNestedArrayItem(dataMap, v.Field(i).Type().Elem())
+ emptySlice = reflect.Append(emptySlice, item)
+ v.Field(i).Set(emptySlice)
+ }
+ return v, nil
+}
diff --git a/vendor/github.com/ONSdigital/go-ns/avro/test_data.go b/vendor/github.com/ONSdigital/go-ns/avro/test_data.go
new file mode 100644
index 00000000..2d6c0ca4
--- /dev/null
+++ b/vendor/github.com/ONSdigital/go-ns/avro/test_data.go
@@ -0,0 +1,98 @@
+package avro
+
+var testSchema = `{ "type": "record",
+ "name": "example",
+ "fields": [
+ {"name": "manager", "type": "string"},
+ {"name": "team_name", "type": "string"},
+ {"name": "ownerOfTeam", "type": "string"},
+ {"name": "kind-of-sport", "type": "string"},
+ {"name": "uri", "type": "string", "default": ""},
+ {"name": "has_changed_name", "type": "boolean"},
+ {"name": "number_of_players", "type": "int"},
+ {"name": "pay_per_week", "type": "long"}
+ ]
+}`
+
+var testArraySchema = `{ "type": "record",
+ "name": "example",
+ "fields": [
+ {"name": "winning_years","type":["null",{"type":"array","items":"string"}]},
+ ]
+}`
+
+var testNestedArraySchema = `{
+ "type": "record",
+ "name": "example",
+ "fields": [
+ {
+ "name" : "team",
+ "type" : "string"
+ },
+ {
+ "name" : "footballers",
+ "type" : {
+ "type" : "array",
+ "items" : {
+ "name" : "footballer",
+ "type" : "record",
+ "fields" : [
+ {
+ "name" : "email",
+ "type" : "string"
+ },
+ {
+ "name": "name",
+ "type": "string"
+ }
+ ]
+ }
+ }
+ }
+ ]
+}
+`
+
+type testData struct {
+ Manager string `avro:"manager"`
+ TeamName string `avro:"team_name"`
+ Owner string `avro:"ownerOfTeam"`
+ Sport string `avro:"kind-of-sport"`
+ URI string `avro:"uri"`
+ HasChangedName bool `avro:"has_changed_name"`
+ NumberOfPlayers int32 `avro:"number_of_players"`
+ PayPerWeek int64 `avro:"pay_per_week"`
+}
+
+type testData1 struct {
+ Manager string `avro:"manager"`
+ TeamName string `avro:"team_name"`
+ Owner string `avro:"ownerOfTeam"`
+ Sport string `avro:"kind-of-sport"`
+ URI string `avro:"-"`
+ HasChangedName bool `avro:"has_changed_name"`
+ NumberOfPlayers int32 `avro:"number_of_players"`
+ PayPerWeek int64 `avro:"pay_per_week"`
+}
+
+type testData2 struct {
+ Manager string `avro:"manager"`
+ URI string `avro:"-"`
+ HasChangedName bool `avro:"has_changed_name"`
+ NumberOfPlayers int32 `avro:"number_of_players"`
+ NumberOfYouths int `avro:"number_of_youths"`
+}
+
+type testData3 struct {
+ WinningYears []string `avro:"winning_years"`
+}
+
+type testData4 struct {
+ Team string `avro:"team"`
+ Footballers []Footballer `avro:"footballers"`
+}
+
+type Footballer struct {
+ Email string `avro:"email"`
+ Name string `avro:"name"`
+}
diff --git a/vendor/github.com/ONSdigital/go-ns/kafka/README.md b/vendor/github.com/ONSdigital/go-ns/kafka/README.md
new file mode 100644
index 00000000..a0653fd1
--- /dev/null
+++ b/vendor/github.com/ONSdigital/go-ns/kafka/README.md
@@ -0,0 +1,14 @@
+Kafka wrapper
+=====
+
+Use channels to abstract kafka consumers and producers.
+
+For graceful handling of Closing consumers, it is advised to use the `StopListeningToConsumer` method prior to the `Close` method. This will allow inflight messages to be completed and successfully call commit so that the message does not get replayed once the application restarts.
+
+It is recommended to use `NewSyncConsumer` - where, when you have read a message from `Incoming()`,
+the listener for messages will block (and not read the next message from kafka)
+until you signal that the message has been consumed (typically with `CommitAndRelease(msg)`).
+Otherwise, if the application gets shutdown (e.g. interrupt signal), and has to be shutdown,
+the consumer may not be shutdown in a timely manner (because it is blocked sending the read message to `Incoming()`).
+
+See the [example source file](../cmd/kafka-example/main.go) for a typical usage.
diff --git a/vendor/github.com/ONSdigital/go-ns/kafka/consumer-group.go b/vendor/github.com/ONSdigital/go-ns/kafka/consumer-group.go
new file mode 100644
index 00000000..12582139
--- /dev/null
+++ b/vendor/github.com/ONSdigital/go-ns/kafka/consumer-group.go
@@ -0,0 +1,209 @@
+package kafka
+
+import (
+ "context"
+ "time"
+
+ "github.com/ONSdigital/go-ns/log"
+ "github.com/bsm/sarama-cluster"
+)
+
+var tick = time.Millisecond * 1500
+
+// ConsumerGroup represents a Kafka consumer group instance.
+type ConsumerGroup struct {
+ consumer *cluster.Consumer
+ incoming chan Message
+ errors chan error
+ closer chan struct{}
+ closed chan struct{}
+ topic string
+ group string
+ sync bool
+ upstreamDone chan bool
+}
+
+// Incoming provides a channel of incoming messages.
+func (cg ConsumerGroup) Incoming() chan Message {
+ return cg.incoming
+}
+
+// Errors provides a channel of incoming errors.
+func (cg ConsumerGroup) Errors() chan error {
+ return cg.errors
+}
+
+// Release signals that upstream has completed an incoming message
+// i.e. move on to read the next message
+func (cg ConsumerGroup) Release() {
+ cg.upstreamDone <- true
+}
+
+// CommitAndRelease commits the consumed message and release the consumer listener to read another message
+func (cg ConsumerGroup) CommitAndRelease(msg Message) {
+ msg.Commit()
+ cg.Release()
+}
+
+// StopListeningToConsumer stops any more messages being consumed off kafka topic
+func (cg *ConsumerGroup) StopListeningToConsumer(ctx context.Context) (err error) {
+
+ if ctx == nil {
+ ctx = context.Background()
+ }
+
+ close(cg.closer)
+
+ logData := log.Data{"topic": cg.topic, "group": cg.group}
+ select {
+ case <-cg.closed:
+ log.Info("StopListeningToConsumer got confirmation of closed kafka consumer listener", logData)
+ case <-ctx.Done():
+ err = ctx.Err()
+ log.ErrorC("StopListeningToConsumer abandoned: context done", err, logData)
+ }
+ return
+}
+
+// Close safely closes the consumer and releases all resources.
+// pass in a context with a timeout or deadline.
+// Passing a nil context will provide no timeout but is not recommended
+func (cg *ConsumerGroup) Close(ctx context.Context) (err error) {
+
+ if ctx == nil {
+ ctx = context.Background()
+ }
+
+ // close(closer) - the select{} avoids panic if already closed (by StopListeningToConsumer)
+ select {
+ case <-cg.closer:
+ default:
+ close(cg.closer)
+ }
+
+ logData := log.Data{"topic": cg.topic, "group": cg.group}
+ select {
+ case <-cg.closed:
+ close(cg.errors)
+ close(cg.incoming)
+
+ if err = cg.consumer.Close(); err != nil {
+ log.ErrorC("Close failed of kafka consumer group", err, logData)
+ } else {
+ log.Info("Successfully closed kafka consumer group", logData)
+ }
+ case <-ctx.Done():
+ err = ctx.Err()
+ log.ErrorC("Close abandoned: context done", err, logData)
+ }
+ return
+}
+
+// NewSyncConsumer returns a new synchronous consumer group using default configuration.
+func NewSyncConsumer(brokers []string, topic string, group string, offset int64) (*ConsumerGroup, error) {
+ return newConsumer(brokers, topic, group, offset, true)
+}
+
+// NewConsumerGroup returns a new asynchronous consumer group using default configuration.
+func NewConsumerGroup(brokers []string, topic string, group string, offset int64) (*ConsumerGroup, error) {
+ return newConsumer(brokers, topic, group, offset, false)
+}
+
+// newConsumer returns a new consumer group using default configuration.
+func newConsumer(brokers []string, topic string, group string, offset int64, sync bool) (*ConsumerGroup, error) {
+
+ config := cluster.NewConfig()
+ config.Group.Return.Notifications = true
+ config.Consumer.Return.Errors = true
+ config.Consumer.MaxWaitTime = 50 * time.Millisecond
+ config.Consumer.Offsets.Initial = offset
+
+ logData := log.Data{"topic": topic, "group": group}
+
+ consumer, err := cluster.NewConsumer(brokers, group, []string{topic}, config)
+ if err != nil {
+ log.ErrorC("newConsumer failed", err, logData)
+ return nil, err
+ }
+
+ var upstream chan Message
+ if sync {
+ // make the upstream channel buffered, so we can send-and-wait for upstreamDone
+ upstream = make(chan Message, 1)
+ } else {
+ upstream = make(chan Message)
+ }
+
+ cg := &ConsumerGroup{
+ consumer: consumer,
+ incoming: upstream,
+ closer: make(chan struct{}),
+ closed: make(chan struct{}),
+ errors: make(chan error),
+ topic: topic,
+ group: group,
+ sync: sync,
+ upstreamDone: make(chan bool, 1),
+ }
+
+ // listener goroutine - listen to consumer.Messages() and upstream them
+ // if this blocks while upstreaming a message, we can shutdown consumer via the following goroutine
+ go func() {
+ logData := log.Data{"topic": topic, "group": group}
+
+ log.Info("Started kafka consumer listener", logData)
+ defer close(cg.closed)
+ for looping := true; looping; {
+ select {
+ case <-cg.closer:
+ looping = false
+ case msg := <-cg.consumer.Messages():
+ cg.Incoming() <- SaramaMessage{msg, cg.consumer}
+ if cg.sync {
+ // wait for msg-processed or close-consumer triggers
+ for loopingForSync := true; looping && loopingForSync; {
+ select {
+ case <-cg.upstreamDone:
+ loopingForSync = false
+ case <-cg.closer:
+ // XXX if we read closer here, this means that the release/upstreamDone blocks unless it is buffered
+ looping = false
+ }
+ }
+ }
+ }
+ }
+ cg.consumer.CommitOffsets()
+ log.Info("Closed kafka consumer listener", logData)
+ }()
+
+ // control goroutine - allows us to close consumer even if blocked while upstreaming a message (above)
+ go func() {
+ logData := log.Data{"topic": topic, "group": group}
+
+ hasBalanced := false // avoid CommitOffsets() being called before we have balanced (otherwise causes a panic)
+ for looping := true; looping; {
+ select {
+ case <-cg.closer:
+ log.Info("Closing kafka consumer controller", logData)
+ <-cg.closed
+ looping = false
+ case err := <-cg.consumer.Errors():
+ log.Error(err, nil)
+ cg.Errors() <- err
+ case <-time.After(tick):
+ if hasBalanced {
+ cg.consumer.CommitOffsets()
+ }
+ case n, more := <-cg.consumer.Notifications():
+ if more {
+ hasBalanced = true
+ log.Trace("Rebalancing group", log.Data{"topic": cg.topic, "group": cg.group, "partitions": n.Current[cg.topic]})
+ }
+ }
+ }
+ log.Info("Closed kafka consumer controller", logData)
+ }()
+
+ return cg, nil
+}
diff --git a/vendor/github.com/ONSdigital/go-ns/kafka/global.go b/vendor/github.com/ONSdigital/go-ns/kafka/global.go
new file mode 100644
index 00000000..956dc138
--- /dev/null
+++ b/vendor/github.com/ONSdigital/go-ns/kafka/global.go
@@ -0,0 +1,29 @@
+package kafka
+
+import (
+ "github.com/Shopify/sarama"
+)
+
+const (
+ OffsetNewest = sarama.OffsetNewest
+ OffsetOldest = sarama.OffsetOldest
+)
+
+func SetMaxMessageSize(maxSize int32) {
+ sarama.MaxRequestSize = maxSize
+ sarama.MaxResponseSize = maxSize
+}
+
+// MessageConsumer provides a generic interface for consuming []byte messages
+type MessageConsumer interface {
+ Incoming() chan Message
+ Closer() chan bool
+ Errors() chan error
+}
+
+// MessageProducer provides a generic interface for producing []byte messages
+type MessageProducer interface {
+ Output() chan []byte
+ Closer() chan bool
+ Errors() chan error
+}
diff --git a/vendor/github.com/ONSdigital/go-ns/kafka/message.go b/vendor/github.com/ONSdigital/go-ns/kafka/message.go
new file mode 100644
index 00000000..4dd2b877
--- /dev/null
+++ b/vendor/github.com/ONSdigital/go-ns/kafka/message.go
@@ -0,0 +1,40 @@
+package kafka
+
+import (
+ "github.com/Shopify/sarama"
+ "github.com/bsm/sarama-cluster"
+)
+
+// Message represents a single kafka message.
+type Message interface {
+
+ // GetData returns the message contents.
+ GetData() []byte
+
+ // Commit the message's offset.
+ Commit()
+
+ // Offset returns the message offset
+ Offset() int64
+}
+
+// SaramaMessage represents a Sarama specific Kafka message
+type SaramaMessage struct {
+ message *sarama.ConsumerMessage
+ consumer *cluster.Consumer
+}
+
+// GetData returns the message contents.
+func (M SaramaMessage) GetData() []byte {
+ return M.message.Value
+}
+
+// Offset returns the message offset
+func (M SaramaMessage) Offset() int64 {
+ return M.message.Offset
+}
+
+// Commit the message's offset.
+func (M SaramaMessage) Commit() {
+ M.consumer.MarkOffset(M.message, "metadata")
+}
diff --git a/vendor/github.com/ONSdigital/go-ns/kafka/producer.go b/vendor/github.com/ONSdigital/go-ns/kafka/producer.go
new file mode 100644
index 00000000..30df0c86
--- /dev/null
+++ b/vendor/github.com/ONSdigital/go-ns/kafka/producer.go
@@ -0,0 +1,92 @@
+package kafka
+
+import (
+ "context"
+ "errors"
+
+ "github.com/ONSdigital/go-ns/log"
+ "github.com/Shopify/sarama"
+)
+
+// ErrShutdownTimedOut represents an error received due to the context
+// deadline being exceeded
+var ErrShutdownTimedOut = errors.New("Shutdown context timed out")
+
+// Producer provides a producer of Kafka messages
+type Producer struct {
+ producer sarama.AsyncProducer
+ output chan []byte
+ errors chan error
+ closer chan struct{}
+ closed chan struct{}
+}
+
+// Output is the channel to send outgoing messages to.
+func (producer Producer) Output() chan []byte {
+ return producer.output
+}
+
+// Errors provides errors returned from Kafka.
+func (producer Producer) Errors() chan error {
+ return producer.errors
+}
+
+// Close safely closes the consumer and releases all resources.
+// pass in a context with a timeout or deadline.
+// Passing a nil context will provide no timeout but is not recommended
+func (producer *Producer) Close(ctx context.Context) (err error) {
+
+ if ctx == nil {
+ ctx = context.Background()
+ }
+
+ close(producer.closer)
+
+ select {
+ case <-producer.closed:
+ close(producer.errors)
+ close(producer.output)
+ log.Info("Successfully closed kafka producer", nil)
+ return producer.producer.Close()
+
+ case <-ctx.Done():
+ log.Info("Shutdown context time exceeded, skipping graceful shutdown of consumer group", nil)
+ return ErrShutdownTimedOut
+ }
+}
+
+// NewProducer returns a new producer instance using the provided config. The rest of the config is set to defaults.
+func NewProducer(brokers []string, topic string, envMax int) (Producer, error) {
+ config := sarama.NewConfig()
+ if envMax > 0 {
+ config.Producer.MaxMessageBytes = envMax
+ }
+ producer, err := sarama.NewAsyncProducer(brokers, config)
+ if err != nil {
+ return Producer{}, err
+ }
+
+ outputChannel := make(chan []byte)
+ errorChannel := make(chan error)
+ closerChannel := make(chan struct{})
+ closedChannel := make(chan struct{})
+
+ go func() {
+ defer close(closedChannel)
+ log.Info("Started kafka producer", log.Data{"topic": topic})
+ for {
+ select {
+ case err := <-producer.Errors():
+ log.ErrorC("Producer", err, log.Data{"topic": topic})
+ errorChannel <- err
+ case message := <-outputChannel:
+ producer.Input() <- &sarama.ProducerMessage{Topic: topic, Value: sarama.StringEncoder(message)}
+ case <-closerChannel:
+ log.Info("Closing kafka producer", log.Data{"topic": topic})
+ return
+ }
+ }
+ }()
+
+ return Producer{producer, outputChannel, errorChannel, closerChannel, closedChannel}, nil
+}
diff --git a/vendor/github.com/Shopify/sarama/CHANGELOG.md b/vendor/github.com/Shopify/sarama/CHANGELOG.md
new file mode 100644
index 00000000..0a0082df
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/CHANGELOG.md
@@ -0,0 +1,389 @@
+# Changelog
+
+#### Version 1.12.0 (2017-05-08)
+
+New Features:
+ - Added support for the `ApiVersions` request and response pair, and Kafka
+ version 0.10.2 ([#867](https://github.com/Shopify/sarama/pull/867)). Note
+ that you still need to specify the Kafka version in the Sarama configuration
+ for the time being.
+ - Added a `Brokers` method to the Client which returns the complete set of
+ active brokers ([#813](https://github.com/Shopify/sarama/pull/813)).
+ - Added an `InSyncReplicas` method to the Client which returns the set of all
+ in-sync broker IDs for the given partition, now that the Kafka versions for
+ which this was misleading are no longer in our supported set
+ ([#872](https://github.com/Shopify/sarama/pull/872)).
+ - Added a `NewCustomHashPartitioner` method which allows constructing a hash
+ partitioner with a custom hash method in case the default (FNV-1a) is not
+ suitable
+ ([#837](https://github.com/Shopify/sarama/pull/837),
+ [#841](https://github.com/Shopify/sarama/pull/841)).
+
+Improvements:
+ - Recognize more Kafka error codes
+ ([#859](https://github.com/Shopify/sarama/pull/859)).
+
+Bug Fixes:
+ - Fix an issue where decoding a malformed FetchRequest would not return the
+ correct error ([#818](https://github.com/Shopify/sarama/pull/818)).
+ - Respect ordering of group protocols in JoinGroupRequests. This fix is
+ transparent if you're using the `AddGroupProtocol` or
+ `AddGroupProtocolMetadata` helpers; otherwise you will need to switch from
+ the `GroupProtocols` field (now deprecated) to use `OrderedGroupProtocols`
+ ([#812](https://github.com/Shopify/sarama/issues/812)).
+ - Fix an alignment-related issue with atomics on 32-bit architectures
+ ([#859](https://github.com/Shopify/sarama/pull/859)).
+
+#### Version 1.11.0 (2016-12-20)
+
+_Important:_ As of Sarama 1.11 it is necessary to set the config value of
+`Producer.Return.Successes` to true in order to use the SyncProducer. Previous
+versions would silently override this value when instantiating a SyncProducer
+which led to unexpected values and data races.
+
+New Features:
+ - Metrics! Thanks to Sébastien Launay for all his work on this feature
+ ([#701](https://github.com/Shopify/sarama/pull/701),
+ [#746](https://github.com/Shopify/sarama/pull/746),
+ [#766](https://github.com/Shopify/sarama/pull/766)).
+ - Add support for LZ4 compression
+ ([#786](https://github.com/Shopify/sarama/pull/786)).
+ - Add support for ListOffsetRequest v1 and Kafka 0.10.1
+ ([#775](https://github.com/Shopify/sarama/pull/775)).
+ - Added a `HighWaterMarks` method to the Consumer which aggregates the
+ `HighWaterMarkOffset` values of its child topic/partitions
+ ([#769](https://github.com/Shopify/sarama/pull/769)).
+
+Bug Fixes:
+ - Fixed producing when using timestamps, compression and Kafka 0.10
+ ([#759](https://github.com/Shopify/sarama/pull/759)).
+ - Added missing decoder methods to DescribeGroups response
+ ([#756](https://github.com/Shopify/sarama/pull/756)).
+ - Fix producer shutdown when `Return.Errors` is disabled
+ ([#787](https://github.com/Shopify/sarama/pull/787)).
+ - Don't mutate configuration in SyncProducer
+ ([#790](https://github.com/Shopify/sarama/pull/790)).
+ - Fix crash on SASL initialization failure
+ ([#795](https://github.com/Shopify/sarama/pull/795)).
+
+#### Version 1.10.1 (2016-08-30)
+
+Bug Fixes:
+ - Fix the documentation for `HashPartitioner` which was incorrect
+ ([#717](https://github.com/Shopify/sarama/pull/717)).
+ - Permit client creation even when it is limited by ACLs
+ ([#722](https://github.com/Shopify/sarama/pull/722)).
+ - Several fixes to the consumer timer optimization code, regressions introduced
+ in v1.10.0. Go's timers are finicky
+ ([#730](https://github.com/Shopify/sarama/pull/730),
+ [#733](https://github.com/Shopify/sarama/pull/733),
+ [#734](https://github.com/Shopify/sarama/pull/734)).
+ - Handle consuming compressed relative offsets with Kafka 0.10
+ ([#735](https://github.com/Shopify/sarama/pull/735)).
+
+#### Version 1.10.0 (2016-08-02)
+
+_Important:_ As of Sarama 1.10 it is necessary to tell Sarama the version of
+Kafka you are running against (via the `config.Version` value) in order to use
+features that may not be compatible with old Kafka versions. If you don't
+specify this value it will default to 0.8.2 (the minimum supported), and trying
+to use more recent features (like the offset manager) will fail with an error.
+
+_Also:_ The offset-manager's behaviour has been changed to match the upstream
+java consumer (see [#705](https://github.com/Shopify/sarama/pull/705) and
+[#713](https://github.com/Shopify/sarama/pull/713)). If you use the
+offset-manager, please ensure that you are committing one *greater* than the
+last consumed message offset or else you may end up consuming duplicate
+messages.
+
+New Features:
+ - Support for Kafka 0.10
+ ([#672](https://github.com/Shopify/sarama/pull/672),
+ [#678](https://github.com/Shopify/sarama/pull/678),
+ [#681](https://github.com/Shopify/sarama/pull/681), and others).
+ - Support for configuring the target Kafka version
+ ([#676](https://github.com/Shopify/sarama/pull/676)).
+ - Batch producing support in the SyncProducer
+ ([#677](https://github.com/Shopify/sarama/pull/677)).
+ - Extend producer mock to allow setting expectations on message contents
+ ([#667](https://github.com/Shopify/sarama/pull/667)).
+
+Improvements:
+ - Support `nil` compressed messages for deleting in compacted topics
+ ([#634](https://github.com/Shopify/sarama/pull/634)).
+ - Pre-allocate decoding errors, greatly reducing heap usage and GC time against
+ misbehaving brokers ([#690](https://github.com/Shopify/sarama/pull/690)).
+ - Re-use consumer expiry timers, removing one allocation per consumed message
+ ([#707](https://github.com/Shopify/sarama/pull/707)).
+
+Bug Fixes:
+ - Actually default the client ID to "sarama" like we say we do
+ ([#664](https://github.com/Shopify/sarama/pull/664)).
+ - Fix a rare issue where `Client.Leader` could return the wrong error
+ ([#685](https://github.com/Shopify/sarama/pull/685)).
+ - Fix a possible tight loop in the consumer
+ ([#693](https://github.com/Shopify/sarama/pull/693)).
+ - Match upstream's offset-tracking behaviour
+ ([#705](https://github.com/Shopify/sarama/pull/705)).
+ - Report UnknownTopicOrPartition errors from the offset manager
+ ([#706](https://github.com/Shopify/sarama/pull/706)).
+ - Fix possible negative partition value from the HashPartitioner
+ ([#709](https://github.com/Shopify/sarama/pull/709)).
+
+#### Version 1.9.0 (2016-05-16)
+
+New Features:
+ - Add support for custom offset manager retention durations
+ ([#602](https://github.com/Shopify/sarama/pull/602)).
+ - Publish low-level mocks to enable testing of third-party producer/consumer
+ implementations ([#570](https://github.com/Shopify/sarama/pull/570)).
+ - Declare support for Golang 1.6
+ ([#611](https://github.com/Shopify/sarama/pull/611)).
+ - Support for SASL plain-text auth
+ ([#648](https://github.com/Shopify/sarama/pull/648)).
+
+Improvements:
+ - Simplified broker locking scheme slightly
+ ([#604](https://github.com/Shopify/sarama/pull/604)).
+ - Documentation cleanup
+ ([#605](https://github.com/Shopify/sarama/pull/605),
+ [#621](https://github.com/Shopify/sarama/pull/621),
+ [#654](https://github.com/Shopify/sarama/pull/654)).
+
+Bug Fixes:
+ - Fix race condition shutting down the OffsetManager
+ ([#658](https://github.com/Shopify/sarama/pull/658)).
+
+#### Version 1.8.0 (2016-02-01)
+
+New Features:
+ - Full support for Kafka 0.9:
+ - All protocol messages and fields
+ ([#586](https://github.com/Shopify/sarama/pull/586),
+ [#588](https://github.com/Shopify/sarama/pull/588),
+ [#590](https://github.com/Shopify/sarama/pull/590)).
+ - Verified that TLS support works
+ ([#581](https://github.com/Shopify/sarama/pull/581)).
+ - Fixed the OffsetManager compatibility
+ ([#585](https://github.com/Shopify/sarama/pull/585)).
+
+Improvements:
+ - Optimize for fewer system calls when reading from the network
+ ([#584](https://github.com/Shopify/sarama/pull/584)).
+ - Automatically retry `InvalidMessage` errors to match upstream behaviour
+ ([#589](https://github.com/Shopify/sarama/pull/589)).
+
+#### Version 1.7.0 (2015-12-11)
+
+New Features:
+ - Preliminary support for Kafka 0.9
+ ([#572](https://github.com/Shopify/sarama/pull/572)). This comes with several
+ caveats:
+ - Protocol-layer support is mostly in place
+ ([#577](https://github.com/Shopify/sarama/pull/577)), however Kafka 0.9
+ renamed some messages and fields, which we did not in order to preserve API
+ compatibility.
+ - The producer and consumer work against 0.9, but the offset manager does
+ not ([#573](https://github.com/Shopify/sarama/pull/573)).
+ - TLS support may or may not work
+ ([#581](https://github.com/Shopify/sarama/pull/581)).
+
+Improvements:
+ - Don't wait for request timeouts on dead brokers, greatly speeding recovery
+ when the TCP connection is left hanging
+ ([#548](https://github.com/Shopify/sarama/pull/548)).
+ - Refactored part of the producer. The new version provides a much more elegant
+ solution to [#449](https://github.com/Shopify/sarama/pull/449). It is also
+ slightly more efficient, and much more precise in calculating batch sizes
+ when compression is used
+ ([#549](https://github.com/Shopify/sarama/pull/549),
+ [#550](https://github.com/Shopify/sarama/pull/550),
+ [#551](https://github.com/Shopify/sarama/pull/551)).
+
+Bug Fixes:
+ - Fix race condition in consumer test mock
+ ([#553](https://github.com/Shopify/sarama/pull/553)).
+
+#### Version 1.6.1 (2015-09-25)
+
+Bug Fixes:
+ - Fix panic that could occur if a user-supplied message value failed to encode
+ ([#449](https://github.com/Shopify/sarama/pull/449)).
+
+#### Version 1.6.0 (2015-09-04)
+
+New Features:
+ - Implementation of a consumer offset manager using the APIs introduced in
+ Kafka 0.8.2. The API is designed mainly for integration into a future
+ high-level consumer, not for direct use, although it is *possible* to use it
+ directly.
+ ([#461](https://github.com/Shopify/sarama/pull/461)).
+
+Improvements:
+ - CRC32 calculation is much faster on machines with SSE4.2 instructions,
+ removing a major hotspot from most profiles
+ ([#255](https://github.com/Shopify/sarama/pull/255)).
+
+Bug Fixes:
+ - Make protocol decoding more robust against some malformed packets generated
+ by go-fuzz ([#523](https://github.com/Shopify/sarama/pull/523),
+ [#525](https://github.com/Shopify/sarama/pull/525)) or found in other ways
+ ([#528](https://github.com/Shopify/sarama/pull/528)).
+ - Fix a potential race condition panic in the consumer on shutdown
+ ([#529](https://github.com/Shopify/sarama/pull/529)).
+
+#### Version 1.5.0 (2015-08-17)
+
+New Features:
+ - TLS-encrypted network connections are now supported. This feature is subject
+ to change when Kafka releases built-in TLS support, but for now this is
+ enough to work with TLS-terminating proxies
+ ([#154](https://github.com/Shopify/sarama/pull/154)).
+
+Improvements:
+ - The consumer will not block if a single partition is not drained by the user;
+ all other partitions will continue to consume normally
+ ([#485](https://github.com/Shopify/sarama/pull/485)).
+ - Formatting of error strings has been much improved
+ ([#495](https://github.com/Shopify/sarama/pull/495)).
+ - Internal refactoring of the producer for code cleanliness and to enable
+ future work ([#300](https://github.com/Shopify/sarama/pull/300)).
+
+Bug Fixes:
+ - Fix a potential deadlock in the consumer on shutdown
+ ([#475](https://github.com/Shopify/sarama/pull/475)).
+
+#### Version 1.4.3 (2015-07-21)
+
+Bug Fixes:
+ - Don't include the partitioner in the producer's "fetch partitions"
+ circuit-breaker ([#466](https://github.com/Shopify/sarama/pull/466)).
+ - Don't retry messages until the broker is closed when abandoning a broker in
+ the producer ([#468](https://github.com/Shopify/sarama/pull/468)).
+ - Update the import path for snappy-go, it has moved again and the API has
+ changed slightly ([#486](https://github.com/Shopify/sarama/pull/486)).
+
+#### Version 1.4.2 (2015-05-27)
+
+Bug Fixes:
+ - Update the import path for snappy-go, it has moved from google code to github
+ ([#456](https://github.com/Shopify/sarama/pull/456)).
+
+#### Version 1.4.1 (2015-05-25)
+
+Improvements:
+ - Optimizations when decoding snappy messages, thanks to John Potocny
+ ([#446](https://github.com/Shopify/sarama/pull/446)).
+
+Bug Fixes:
+ - Fix hypothetical race conditions on producer shutdown
+ ([#450](https://github.com/Shopify/sarama/pull/450),
+ [#451](https://github.com/Shopify/sarama/pull/451)).
+
+#### Version 1.4.0 (2015-05-01)
+
+New Features:
+ - The consumer now implements `Topics()` and `Partitions()` methods to enable
+ users to dynamically choose what topics/partitions to consume without
+ instantiating a full client
+ ([#431](https://github.com/Shopify/sarama/pull/431)).
+ - The partition-consumer now exposes the high water mark offset value returned
+ by the broker via the `HighWaterMarkOffset()` method ([#339](https://github.com/Shopify/sarama/pull/339)).
+ - Added a `kafka-console-consumer` tool capable of handling multiple
+ partitions, and deprecated the now-obsolete `kafka-console-partitionConsumer`
+ ([#439](https://github.com/Shopify/sarama/pull/439),
+ [#442](https://github.com/Shopify/sarama/pull/442)).
+
+Improvements:
+ - The producer's logging during retry scenarios is more consistent, more
+ useful, and slightly less verbose
+ ([#429](https://github.com/Shopify/sarama/pull/429)).
+ - The client now shuffles its initial list of seed brokers in order to prevent
+ thundering herd on the first broker in the list
+ ([#441](https://github.com/Shopify/sarama/pull/441)).
+
+Bug Fixes:
+ - The producer now correctly manages its state if retries occur when it is
+ shutting down, fixing several instances of confusing behaviour and at least
+ one potential deadlock ([#419](https://github.com/Shopify/sarama/pull/419)).
+ - The consumer now handles messages for different partitions asynchronously,
+ making it much more resilient to specific user code ordering
+ ([#325](https://github.com/Shopify/sarama/pull/325)).
+
+#### Version 1.3.0 (2015-04-16)
+
+New Features:
+ - The client now tracks consumer group coordinators using
+ ConsumerMetadataRequests similar to how it tracks partition leadership using
+ regular MetadataRequests ([#411](https://github.com/Shopify/sarama/pull/411)).
+ This adds two methods to the client API:
+ - `Coordinator(consumerGroup string) (*Broker, error)`
+ - `RefreshCoordinator(consumerGroup string) error`
+
+Improvements:
+ - ConsumerMetadataResponses now automatically create a Broker object out of the
+ ID/address/port combination for the Coordinator; accessing the fields
+ individually has been deprecated
+ ([#413](https://github.com/Shopify/sarama/pull/413)).
+ - Much improved handling of `OffsetOutOfRange` errors in the consumer.
+ Consumers will fail to start if the provided offset is out of range
+ ([#418](https://github.com/Shopify/sarama/pull/418))
+ and they will automatically shut down if the offset falls out of range
+ ([#424](https://github.com/Shopify/sarama/pull/424)).
+ - Small performance improvement in encoding and decoding protocol messages
+ ([#427](https://github.com/Shopify/sarama/pull/427)).
+
+Bug Fixes:
+ - Fix a rare race condition in the client's background metadata refresher if
+ it happens to be activated while the client is being closed
+ ([#422](https://github.com/Shopify/sarama/pull/422)).
+
+#### Version 1.2.0 (2015-04-07)
+
+Improvements:
+ - The producer's behaviour when `Flush.Frequency` is set is now more intuitive
+ ([#389](https://github.com/Shopify/sarama/pull/389)).
+ - The producer is now somewhat more memory-efficient during and after retrying
+ messages due to an improved queue implementation
+ ([#396](https://github.com/Shopify/sarama/pull/396)).
+ - The consumer produces much more useful logging output when leadership
+ changes ([#385](https://github.com/Shopify/sarama/pull/385)).
+ - The client's `GetOffset` method will now automatically refresh metadata and
+ retry once in the event of stale information or similar
+ ([#394](https://github.com/Shopify/sarama/pull/394)).
+ - Broker connections now have support for using TCP keepalives
+ ([#407](https://github.com/Shopify/sarama/issues/407)).
+
+Bug Fixes:
+ - The OffsetCommitRequest message now correctly implements all three possible
+ API versions ([#390](https://github.com/Shopify/sarama/pull/390),
+ [#400](https://github.com/Shopify/sarama/pull/400)).
+
+#### Version 1.1.0 (2015-03-20)
+
+Improvements:
+ - Wrap the producer's partitioner call in a circuit-breaker so that repeatedly
+ broken topics don't choke throughput
+ ([#373](https://github.com/Shopify/sarama/pull/373)).
+
+Bug Fixes:
+ - Fix the producer's internal reference counting in certain unusual scenarios
+ ([#367](https://github.com/Shopify/sarama/pull/367)).
+ - Fix the consumer's internal reference counting in certain unusual scenarios
+ ([#369](https://github.com/Shopify/sarama/pull/369)).
+ - Fix a condition where the producer's internal control messages could have
+ gotten stuck ([#368](https://github.com/Shopify/sarama/pull/368)).
+ - Fix an issue where invalid partition lists would be cached when asking for
+ metadata for a non-existant topic ([#372](https://github.com/Shopify/sarama/pull/372)).
+
+
+#### Version 1.0.0 (2015-03-17)
+
+Version 1.0.0 is the first tagged version, and is almost a complete rewrite. The primary differences with previous untagged versions are:
+
+- The producer has been rewritten; there is now a `SyncProducer` with a blocking API, and an `AsyncProducer` that is non-blocking.
+- The consumer has been rewritten to only open one connection per broker instead of one connection per partition.
+- The main types of Sarama are now interfaces to make depedency injection easy; mock implementations for `Consumer`, `SyncProducer` and `AsyncProducer` are provided in the `github.com/Shopify/sarama/mocks` package.
+- For most uses cases, it is no longer necessary to open a `Client`; this will be done for you.
+- All the configuration values have been unified in the `Config` struct.
+- Much improved test suite.
diff --git a/vendor/github.com/Shopify/sarama/LICENSE b/vendor/github.com/Shopify/sarama/LICENSE
new file mode 100644
index 00000000..8121b63b
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/LICENSE
@@ -0,0 +1,20 @@
+Copyright (c) 2013 Evan Huus
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice shall be
+included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/vendor/github.com/Shopify/sarama/Makefile b/vendor/github.com/Shopify/sarama/Makefile
new file mode 100644
index 00000000..626b09a5
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/Makefile
@@ -0,0 +1,21 @@
+default: fmt vet errcheck test
+
+test:
+ go test -v -timeout 60s -race ./...
+
+vet:
+ go vet ./...
+
+errcheck:
+ errcheck github.com/Shopify/sarama/...
+
+fmt:
+ @if [ -n "$$(go fmt ./...)" ]; then echo 'Please run go fmt on your code.' && exit 1; fi
+
+install_dependencies: install_errcheck get
+
+install_errcheck:
+ go get github.com/kisielk/errcheck
+
+get:
+ go get -t
diff --git a/vendor/github.com/Shopify/sarama/README.md b/vendor/github.com/Shopify/sarama/README.md
new file mode 100644
index 00000000..6e12a07a
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/README.md
@@ -0,0 +1,38 @@
+sarama
+======
+
+[![GoDoc](https://godoc.org/github.com/Shopify/sarama?status.png)](https://godoc.org/github.com/Shopify/sarama)
+[![Build Status](https://travis-ci.org/Shopify/sarama.svg?branch=master)](https://travis-ci.org/Shopify/sarama)
+
+Sarama is an MIT-licensed Go client library for [Apache Kafka](https://kafka.apache.org/) version 0.8 (and later).
+
+### Getting started
+
+- API documentation and examples are available via [godoc](https://godoc.org/github.com/Shopify/sarama).
+- Mocks for testing are available in the [mocks](./mocks) subpackage.
+- The [examples](./examples) directory contains more elaborate example applications.
+- The [tools](./tools) directory contains command line tools that can be useful for testing, diagnostics, and instrumentation.
+
+You might also want to look at the [Frequently Asked Questions](https://github.com/Shopify/sarama/wiki/Frequently-Asked-Questions).
+
+### Compatibility and API stability
+
+Sarama provides a "2 releases + 2 months" compatibility guarantee: we support
+the two latest stable releases of Kafka and Go, and we provide a two month
+grace period for older releases. This means we currently officially support
+Go 1.8 and 1.7, and Kafka 0.10 and 0.9, although older releases are
+still likely to work.
+
+Sarama follows semantic versioning and provides API stability via the gopkg.in service.
+You can import a version with a guaranteed stable API via http://gopkg.in/Shopify/sarama.v1.
+A changelog is available [here](CHANGELOG.md).
+
+### Contributing
+
+* Get started by checking our [contribution guidelines](https://github.com/Shopify/sarama/blob/master/.github/CONTRIBUTING.md).
+* Read the [Sarama wiki](https://github.com/Shopify/sarama/wiki) for more
+ technical and design details.
+* The [Kafka Protocol Specification](https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol)
+ contains a wealth of useful information.
+* For more general issues, there is [a google group](https://groups.google.com/forum/#!forum/kafka-clients) for Kafka client developers.
+* If you have any questions, just ask!
diff --git a/vendor/github.com/Shopify/sarama/Vagrantfile b/vendor/github.com/Shopify/sarama/Vagrantfile
new file mode 100644
index 00000000..f4b848a3
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/Vagrantfile
@@ -0,0 +1,20 @@
+# -*- mode: ruby -*-
+# vi: set ft=ruby :
+
+# Vagrantfile API/syntax version. Don't touch unless you know what you're doing!
+VAGRANTFILE_API_VERSION = "2"
+
+# We have 5 * 192MB ZK processes and 5 * 320MB Kafka processes => 2560MB
+MEMORY = 3072
+
+Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
+ config.vm.box = "ubuntu/trusty64"
+
+ config.vm.provision :shell, path: "vagrant/provision.sh"
+
+ config.vm.network "private_network", ip: "192.168.100.67"
+
+ config.vm.provider "virtualbox" do |v|
+ v.memory = MEMORY
+ end
+end
diff --git a/vendor/github.com/Shopify/sarama/api_versions_request.go b/vendor/github.com/Shopify/sarama/api_versions_request.go
new file mode 100644
index 00000000..ab65f01c
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/api_versions_request.go
@@ -0,0 +1,24 @@
+package sarama
+
+type ApiVersionsRequest struct {
+}
+
+func (r *ApiVersionsRequest) encode(pe packetEncoder) error {
+ return nil
+}
+
+func (r *ApiVersionsRequest) decode(pd packetDecoder, version int16) (err error) {
+ return nil
+}
+
+func (r *ApiVersionsRequest) key() int16 {
+ return 18
+}
+
+func (r *ApiVersionsRequest) version() int16 {
+ return 0
+}
+
+func (r *ApiVersionsRequest) requiredVersion() KafkaVersion {
+ return V0_10_0_0
+}
diff --git a/vendor/github.com/Shopify/sarama/api_versions_response.go b/vendor/github.com/Shopify/sarama/api_versions_response.go
new file mode 100644
index 00000000..23bc326e
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/api_versions_response.go
@@ -0,0 +1,87 @@
+package sarama
+
+type ApiVersionsResponseBlock struct {
+ ApiKey int16
+ MinVersion int16
+ MaxVersion int16
+}
+
+func (b *ApiVersionsResponseBlock) encode(pe packetEncoder) error {
+ pe.putInt16(b.ApiKey)
+ pe.putInt16(b.MinVersion)
+ pe.putInt16(b.MaxVersion)
+ return nil
+}
+
+func (b *ApiVersionsResponseBlock) decode(pd packetDecoder) error {
+ var err error
+
+ if b.ApiKey, err = pd.getInt16(); err != nil {
+ return err
+ }
+
+ if b.MinVersion, err = pd.getInt16(); err != nil {
+ return err
+ }
+
+ if b.MaxVersion, err = pd.getInt16(); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+type ApiVersionsResponse struct {
+ Err KError
+ ApiVersions []*ApiVersionsResponseBlock
+}
+
+func (r *ApiVersionsResponse) encode(pe packetEncoder) error {
+ pe.putInt16(int16(r.Err))
+ if err := pe.putArrayLength(len(r.ApiVersions)); err != nil {
+ return err
+ }
+ for _, apiVersion := range r.ApiVersions {
+ if err := apiVersion.encode(pe); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (r *ApiVersionsResponse) decode(pd packetDecoder, version int16) error {
+ kerr, err := pd.getInt16()
+ if err != nil {
+ return err
+ }
+
+ r.Err = KError(kerr)
+
+ numBlocks, err := pd.getArrayLength()
+ if err != nil {
+ return err
+ }
+
+ r.ApiVersions = make([]*ApiVersionsResponseBlock, numBlocks)
+ for i := 0; i < numBlocks; i++ {
+ block := new(ApiVersionsResponseBlock)
+ if err := block.decode(pd); err != nil {
+ return err
+ }
+ r.ApiVersions[i] = block
+ }
+
+ return nil
+}
+
+func (r *ApiVersionsResponse) key() int16 {
+ return 18
+}
+
+func (r *ApiVersionsResponse) version() int16 {
+ return 0
+}
+
+func (r *ApiVersionsResponse) requiredVersion() KafkaVersion {
+ return V0_10_0_0
+}
diff --git a/vendor/github.com/Shopify/sarama/async_producer.go b/vendor/github.com/Shopify/sarama/async_producer.go
new file mode 100644
index 00000000..6d71a6d8
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/async_producer.go
@@ -0,0 +1,904 @@
+package sarama
+
+import (
+ "fmt"
+ "sync"
+ "time"
+
+ "github.com/eapache/go-resiliency/breaker"
+ "github.com/eapache/queue"
+)
+
+// AsyncProducer publishes Kafka messages using a non-blocking API. It routes messages
+// to the correct broker for the provided topic-partition, refreshing metadata as appropriate,
+// and parses responses for errors. You must read from the Errors() channel or the
+// producer will deadlock. You must call Close() or AsyncClose() on a producer to avoid
+// leaks: it will not be garbage-collected automatically when it passes out of
+// scope.
+type AsyncProducer interface {
+
+ // AsyncClose triggers a shutdown of the producer. The shutdown has completed
+ // when both the Errors and Successes channels have been closed. When calling
+ // AsyncClose, you *must* continue to read from those channels in order to
+ // drain the results of any messages in flight.
+ AsyncClose()
+
+ // Close shuts down the producer and waits for any buffered messages to be
+ // flushed. You must call this function before a producer object passes out of
+ // scope, as it may otherwise leak memory. You must call this before calling
+ // Close on the underlying client.
+ Close() error
+
+ // Input is the input channel for the user to write messages to that they
+ // wish to send.
+ Input() chan<- *ProducerMessage
+
+ // Successes is the success output channel back to the user when Return.Successes is
+ // enabled. If Return.Successes is true, you MUST read from this channel or the
+ // Producer will deadlock. It is suggested that you send and read messages
+ // together in a single select statement.
+ Successes() <-chan *ProducerMessage
+
+ // Errors is the error output channel back to the user. You MUST read from this
+ // channel or the Producer will deadlock when the channel is full. Alternatively,
+ // you can set Producer.Return.Errors in your config to false, which prevents
+ // errors to be returned.
+ Errors() <-chan *ProducerError
+}
+
+type asyncProducer struct {
+ client Client
+ conf *Config
+ ownClient bool
+
+ errors chan *ProducerError
+ input, successes, retries chan *ProducerMessage
+ inFlight sync.WaitGroup
+
+ brokers map[*Broker]chan<- *ProducerMessage
+ brokerRefs map[chan<- *ProducerMessage]int
+ brokerLock sync.Mutex
+}
+
+// NewAsyncProducer creates a new AsyncProducer using the given broker addresses and configuration.
+func NewAsyncProducer(addrs []string, conf *Config) (AsyncProducer, error) {
+ client, err := NewClient(addrs, conf)
+ if err != nil {
+ return nil, err
+ }
+
+ p, err := NewAsyncProducerFromClient(client)
+ if err != nil {
+ return nil, err
+ }
+ p.(*asyncProducer).ownClient = true
+ return p, nil
+}
+
+// NewAsyncProducerFromClient creates a new Producer using the given client. It is still
+// necessary to call Close() on the underlying client when shutting down this producer.
+func NewAsyncProducerFromClient(client Client) (AsyncProducer, error) {
+ // Check that we are not dealing with a closed Client before processing any other arguments
+ if client.Closed() {
+ return nil, ErrClosedClient
+ }
+
+ p := &asyncProducer{
+ client: client,
+ conf: client.Config(),
+ errors: make(chan *ProducerError),
+ input: make(chan *ProducerMessage),
+ successes: make(chan *ProducerMessage),
+ retries: make(chan *ProducerMessage),
+ brokers: make(map[*Broker]chan<- *ProducerMessage),
+ brokerRefs: make(map[chan<- *ProducerMessage]int),
+ }
+
+ // launch our singleton dispatchers
+ go withRecover(p.dispatcher)
+ go withRecover(p.retryHandler)
+
+ return p, nil
+}
+
+type flagSet int8
+
+const (
+ syn flagSet = 1 << iota // first message from partitionProducer to brokerProducer
+ fin // final message from partitionProducer to brokerProducer and back
+ shutdown // start the shutdown process
+)
+
+// ProducerMessage is the collection of elements passed to the Producer in order to send a message.
+type ProducerMessage struct {
+ Topic string // The Kafka topic for this message.
+ // The partitioning key for this message. Pre-existing Encoders include
+ // StringEncoder and ByteEncoder.
+ Key Encoder
+ // The actual message to store in Kafka. Pre-existing Encoders include
+ // StringEncoder and ByteEncoder.
+ Value Encoder
+
+ // This field is used to hold arbitrary data you wish to include so it
+ // will be available when receiving on the Successes and Errors channels.
+ // Sarama completely ignores this field and is only to be used for
+ // pass-through data.
+ Metadata interface{}
+
+ // Below this point are filled in by the producer as the message is processed
+
+ // Offset is the offset of the message stored on the broker. This is only
+ // guaranteed to be defined if the message was successfully delivered and
+ // RequiredAcks is not NoResponse.
+ Offset int64
+ // Partition is the partition that the message was sent to. This is only
+ // guaranteed to be defined if the message was successfully delivered.
+ Partition int32
+ // Timestamp is the timestamp assigned to the message by the broker. This
+ // is only guaranteed to be defined if the message was successfully
+ // delivered, RequiredAcks is not NoResponse, and the Kafka broker is at
+ // least version 0.10.0.
+ Timestamp time.Time
+
+ retries int
+ flags flagSet
+}
+
+const producerMessageOverhead = 26 // the metadata overhead of CRC, flags, etc.
+
+func (m *ProducerMessage) byteSize() int {
+ size := producerMessageOverhead
+ if m.Key != nil {
+ size += m.Key.Length()
+ }
+ if m.Value != nil {
+ size += m.Value.Length()
+ }
+ return size
+}
+
+func (m *ProducerMessage) clear() {
+ m.flags = 0
+ m.retries = 0
+}
+
+// ProducerError is the type of error generated when the producer fails to deliver a message.
+// It contains the original ProducerMessage as well as the actual error value.
+type ProducerError struct {
+ Msg *ProducerMessage
+ Err error
+}
+
+func (pe ProducerError) Error() string {
+ return fmt.Sprintf("kafka: Failed to produce message to topic %s: %s", pe.Msg.Topic, pe.Err)
+}
+
+// ProducerErrors is a type that wraps a batch of "ProducerError"s and implements the Error interface.
+// It can be returned from the Producer's Close method to avoid the need to manually drain the Errors channel
+// when closing a producer.
+type ProducerErrors []*ProducerError
+
+func (pe ProducerErrors) Error() string {
+ return fmt.Sprintf("kafka: Failed to deliver %d messages.", len(pe))
+}
+
+func (p *asyncProducer) Errors() <-chan *ProducerError {
+ return p.errors
+}
+
+func (p *asyncProducer) Successes() <-chan *ProducerMessage {
+ return p.successes
+}
+
+func (p *asyncProducer) Input() chan<- *ProducerMessage {
+ return p.input
+}
+
+func (p *asyncProducer) Close() error {
+ p.AsyncClose()
+
+ if p.conf.Producer.Return.Successes {
+ go withRecover(func() {
+ for range p.successes {
+ }
+ })
+ }
+
+ var errors ProducerErrors
+ if p.conf.Producer.Return.Errors {
+ for event := range p.errors {
+ errors = append(errors, event)
+ }
+ } else {
+ <-p.errors
+ }
+
+ if len(errors) > 0 {
+ return errors
+ }
+ return nil
+}
+
+func (p *asyncProducer) AsyncClose() {
+ go withRecover(p.shutdown)
+}
+
+// singleton
+// dispatches messages by topic
+func (p *asyncProducer) dispatcher() {
+ handlers := make(map[string]chan<- *ProducerMessage)
+ shuttingDown := false
+
+ for msg := range p.input {
+ if msg == nil {
+ Logger.Println("Something tried to send a nil message, it was ignored.")
+ continue
+ }
+
+ if msg.flags&shutdown != 0 {
+ shuttingDown = true
+ p.inFlight.Done()
+ continue
+ } else if msg.retries == 0 {
+ if shuttingDown {
+ // we can't just call returnError here because that decrements the wait group,
+ // which hasn't been incremented yet for this message, and shouldn't be
+ pErr := &ProducerError{Msg: msg, Err: ErrShuttingDown}
+ if p.conf.Producer.Return.Errors {
+ p.errors <- pErr
+ } else {
+ Logger.Println(pErr)
+ }
+ continue
+ }
+ p.inFlight.Add(1)
+ }
+
+ if msg.byteSize() > p.conf.Producer.MaxMessageBytes {
+ p.returnError(msg, ErrMessageSizeTooLarge)
+ continue
+ }
+
+ handler := handlers[msg.Topic]
+ if handler == nil {
+ handler = p.newTopicProducer(msg.Topic)
+ handlers[msg.Topic] = handler
+ }
+
+ handler <- msg
+ }
+
+ for _, handler := range handlers {
+ close(handler)
+ }
+}
+
+// one per topic
+// partitions messages, then dispatches them by partition
+type topicProducer struct {
+ parent *asyncProducer
+ topic string
+ input <-chan *ProducerMessage
+
+ breaker *breaker.Breaker
+ handlers map[int32]chan<- *ProducerMessage
+ partitioner Partitioner
+}
+
+func (p *asyncProducer) newTopicProducer(topic string) chan<- *ProducerMessage {
+ input := make(chan *ProducerMessage, p.conf.ChannelBufferSize)
+ tp := &topicProducer{
+ parent: p,
+ topic: topic,
+ input: input,
+ breaker: breaker.New(3, 1, 10*time.Second),
+ handlers: make(map[int32]chan<- *ProducerMessage),
+ partitioner: p.conf.Producer.Partitioner(topic),
+ }
+ go withRecover(tp.dispatch)
+ return input
+}
+
+func (tp *topicProducer) dispatch() {
+ for msg := range tp.input {
+ if msg.retries == 0 {
+ if err := tp.partitionMessage(msg); err != nil {
+ tp.parent.returnError(msg, err)
+ continue
+ }
+ }
+
+ handler := tp.handlers[msg.Partition]
+ if handler == nil {
+ handler = tp.parent.newPartitionProducer(msg.Topic, msg.Partition)
+ tp.handlers[msg.Partition] = handler
+ }
+
+ handler <- msg
+ }
+
+ for _, handler := range tp.handlers {
+ close(handler)
+ }
+}
+
+func (tp *topicProducer) partitionMessage(msg *ProducerMessage) error {
+ var partitions []int32
+
+ err := tp.breaker.Run(func() (err error) {
+ if tp.partitioner.RequiresConsistency() {
+ partitions, err = tp.parent.client.Partitions(msg.Topic)
+ } else {
+ partitions, err = tp.parent.client.WritablePartitions(msg.Topic)
+ }
+ return
+ })
+
+ if err != nil {
+ return err
+ }
+
+ numPartitions := int32(len(partitions))
+
+ if numPartitions == 0 {
+ return ErrLeaderNotAvailable
+ }
+
+ choice, err := tp.partitioner.Partition(msg, numPartitions)
+
+ if err != nil {
+ return err
+ } else if choice < 0 || choice >= numPartitions {
+ return ErrInvalidPartition
+ }
+
+ msg.Partition = partitions[choice]
+
+ return nil
+}
+
+// one per partition per topic
+// dispatches messages to the appropriate broker
+// also responsible for maintaining message order during retries
+type partitionProducer struct {
+ parent *asyncProducer
+ topic string
+ partition int32
+ input <-chan *ProducerMessage
+
+ leader *Broker
+ breaker *breaker.Breaker
+ output chan<- *ProducerMessage
+
+ // highWatermark tracks the "current" retry level, which is the only one where we actually let messages through,
+ // all other messages get buffered in retryState[msg.retries].buf to preserve ordering
+ // retryState[msg.retries].expectChaser simply tracks whether we've seen a fin message for a given level (and
+ // therefore whether our buffer is complete and safe to flush)
+ highWatermark int
+ retryState []partitionRetryState
+}
+
+type partitionRetryState struct {
+ buf []*ProducerMessage
+ expectChaser bool
+}
+
+func (p *asyncProducer) newPartitionProducer(topic string, partition int32) chan<- *ProducerMessage {
+ input := make(chan *ProducerMessage, p.conf.ChannelBufferSize)
+ pp := &partitionProducer{
+ parent: p,
+ topic: topic,
+ partition: partition,
+ input: input,
+
+ breaker: breaker.New(3, 1, 10*time.Second),
+ retryState: make([]partitionRetryState, p.conf.Producer.Retry.Max+1),
+ }
+ go withRecover(pp.dispatch)
+ return input
+}
+
+func (pp *partitionProducer) dispatch() {
+ // try to prefetch the leader; if this doesn't work, we'll do a proper call to `updateLeader`
+ // on the first message
+ pp.leader, _ = pp.parent.client.Leader(pp.topic, pp.partition)
+ if pp.leader != nil {
+ pp.output = pp.parent.getBrokerProducer(pp.leader)
+ pp.parent.inFlight.Add(1) // we're generating a syn message; track it so we don't shut down while it's still inflight
+ pp.output <- &ProducerMessage{Topic: pp.topic, Partition: pp.partition, flags: syn}
+ }
+
+ for msg := range pp.input {
+ if msg.retries > pp.highWatermark {
+ // a new, higher, retry level; handle it and then back off
+ pp.newHighWatermark(msg.retries)
+ time.Sleep(pp.parent.conf.Producer.Retry.Backoff)
+ } else if pp.highWatermark > 0 {
+ // we are retrying something (else highWatermark would be 0) but this message is not a *new* retry level
+ if msg.retries < pp.highWatermark {
+ // in fact this message is not even the current retry level, so buffer it for now (unless it's a just a fin)
+ if msg.flags&fin == fin {
+ pp.retryState[msg.retries].expectChaser = false
+ pp.parent.inFlight.Done() // this fin is now handled and will be garbage collected
+ } else {
+ pp.retryState[msg.retries].buf = append(pp.retryState[msg.retries].buf, msg)
+ }
+ continue
+ } else if msg.flags&fin == fin {
+ // this message is of the current retry level (msg.retries == highWatermark) and the fin flag is set,
+ // meaning this retry level is done and we can go down (at least) one level and flush that
+ pp.retryState[pp.highWatermark].expectChaser = false
+ pp.flushRetryBuffers()
+ pp.parent.inFlight.Done() // this fin is now handled and will be garbage collected
+ continue
+ }
+ }
+
+ // if we made it this far then the current msg contains real data, and can be sent to the next goroutine
+ // without breaking any of our ordering guarantees
+
+ if pp.output == nil {
+ if err := pp.updateLeader(); err != nil {
+ pp.parent.returnError(msg, err)
+ time.Sleep(pp.parent.conf.Producer.Retry.Backoff)
+ continue
+ }
+ Logger.Printf("producer/leader/%s/%d selected broker %d\n", pp.topic, pp.partition, pp.leader.ID())
+ }
+
+ pp.output <- msg
+ }
+
+ if pp.output != nil {
+ pp.parent.unrefBrokerProducer(pp.leader, pp.output)
+ }
+}
+
+func (pp *partitionProducer) newHighWatermark(hwm int) {
+ Logger.Printf("producer/leader/%s/%d state change to [retrying-%d]\n", pp.topic, pp.partition, hwm)
+ pp.highWatermark = hwm
+
+ // send off a fin so that we know when everything "in between" has made it
+ // back to us and we can safely flush the backlog (otherwise we risk re-ordering messages)
+ pp.retryState[pp.highWatermark].expectChaser = true
+ pp.parent.inFlight.Add(1) // we're generating a fin message; track it so we don't shut down while it's still inflight
+ pp.output <- &ProducerMessage{Topic: pp.topic, Partition: pp.partition, flags: fin, retries: pp.highWatermark - 1}
+
+ // a new HWM means that our current broker selection is out of date
+ Logger.Printf("producer/leader/%s/%d abandoning broker %d\n", pp.topic, pp.partition, pp.leader.ID())
+ pp.parent.unrefBrokerProducer(pp.leader, pp.output)
+ pp.output = nil
+}
+
+func (pp *partitionProducer) flushRetryBuffers() {
+ Logger.Printf("producer/leader/%s/%d state change to [flushing-%d]\n", pp.topic, pp.partition, pp.highWatermark)
+ for {
+ pp.highWatermark--
+
+ if pp.output == nil {
+ if err := pp.updateLeader(); err != nil {
+ pp.parent.returnErrors(pp.retryState[pp.highWatermark].buf, err)
+ goto flushDone
+ }
+ Logger.Printf("producer/leader/%s/%d selected broker %d\n", pp.topic, pp.partition, pp.leader.ID())
+ }
+
+ for _, msg := range pp.retryState[pp.highWatermark].buf {
+ pp.output <- msg
+ }
+
+ flushDone:
+ pp.retryState[pp.highWatermark].buf = nil
+ if pp.retryState[pp.highWatermark].expectChaser {
+ Logger.Printf("producer/leader/%s/%d state change to [retrying-%d]\n", pp.topic, pp.partition, pp.highWatermark)
+ break
+ } else if pp.highWatermark == 0 {
+ Logger.Printf("producer/leader/%s/%d state change to [normal]\n", pp.topic, pp.partition)
+ break
+ }
+ }
+}
+
+func (pp *partitionProducer) updateLeader() error {
+ return pp.breaker.Run(func() (err error) {
+ if err = pp.parent.client.RefreshMetadata(pp.topic); err != nil {
+ return err
+ }
+
+ if pp.leader, err = pp.parent.client.Leader(pp.topic, pp.partition); err != nil {
+ return err
+ }
+
+ pp.output = pp.parent.getBrokerProducer(pp.leader)
+ pp.parent.inFlight.Add(1) // we're generating a syn message; track it so we don't shut down while it's still inflight
+ pp.output <- &ProducerMessage{Topic: pp.topic, Partition: pp.partition, flags: syn}
+
+ return nil
+ })
+}
+
+// one per broker; also constructs an associated flusher
+func (p *asyncProducer) newBrokerProducer(broker *Broker) chan<- *ProducerMessage {
+ var (
+ input = make(chan *ProducerMessage)
+ bridge = make(chan *produceSet)
+ responses = make(chan *brokerProducerResponse)
+ )
+
+ bp := &brokerProducer{
+ parent: p,
+ broker: broker,
+ input: input,
+ output: bridge,
+ responses: responses,
+ buffer: newProduceSet(p),
+ currentRetries: make(map[string]map[int32]error),
+ }
+ go withRecover(bp.run)
+
+ // minimal bridge to make the network response `select`able
+ go withRecover(func() {
+ for set := range bridge {
+ request := set.buildRequest()
+
+ response, err := broker.Produce(request)
+
+ responses <- &brokerProducerResponse{
+ set: set,
+ err: err,
+ res: response,
+ }
+ }
+ close(responses)
+ })
+
+ return input
+}
+
+type brokerProducerResponse struct {
+ set *produceSet
+ err error
+ res *ProduceResponse
+}
+
+// groups messages together into appropriately-sized batches for sending to the broker
+// handles state related to retries etc
+type brokerProducer struct {
+ parent *asyncProducer
+ broker *Broker
+
+ input <-chan *ProducerMessage
+ output chan<- *produceSet
+ responses <-chan *brokerProducerResponse
+
+ buffer *produceSet
+ timer <-chan time.Time
+ timerFired bool
+
+ closing error
+ currentRetries map[string]map[int32]error
+}
+
+func (bp *brokerProducer) run() {
+ var output chan<- *produceSet
+ Logger.Printf("producer/broker/%d starting up\n", bp.broker.ID())
+
+ for {
+ select {
+ case msg := <-bp.input:
+ if msg == nil {
+ bp.shutdown()
+ return
+ }
+
+ if msg.flags&syn == syn {
+ Logger.Printf("producer/broker/%d state change to [open] on %s/%d\n",
+ bp.broker.ID(), msg.Topic, msg.Partition)
+ if bp.currentRetries[msg.Topic] == nil {
+ bp.currentRetries[msg.Topic] = make(map[int32]error)
+ }
+ bp.currentRetries[msg.Topic][msg.Partition] = nil
+ bp.parent.inFlight.Done()
+ continue
+ }
+
+ if reason := bp.needsRetry(msg); reason != nil {
+ bp.parent.retryMessage(msg, reason)
+
+ if bp.closing == nil && msg.flags&fin == fin {
+ // we were retrying this partition but we can start processing again
+ delete(bp.currentRetries[msg.Topic], msg.Partition)
+ Logger.Printf("producer/broker/%d state change to [closed] on %s/%d\n",
+ bp.broker.ID(), msg.Topic, msg.Partition)
+ }
+
+ continue
+ }
+
+ if bp.buffer.wouldOverflow(msg) {
+ if err := bp.waitForSpace(msg); err != nil {
+ bp.parent.retryMessage(msg, err)
+ continue
+ }
+ }
+
+ if err := bp.buffer.add(msg); err != nil {
+ bp.parent.returnError(msg, err)
+ continue
+ }
+
+ if bp.parent.conf.Producer.Flush.Frequency > 0 && bp.timer == nil {
+ bp.timer = time.After(bp.parent.conf.Producer.Flush.Frequency)
+ }
+ case <-bp.timer:
+ bp.timerFired = true
+ case output <- bp.buffer:
+ bp.rollOver()
+ case response := <-bp.responses:
+ bp.handleResponse(response)
+ }
+
+ if bp.timerFired || bp.buffer.readyToFlush() {
+ output = bp.output
+ } else {
+ output = nil
+ }
+ }
+}
+
+func (bp *brokerProducer) shutdown() {
+ for !bp.buffer.empty() {
+ select {
+ case response := <-bp.responses:
+ bp.handleResponse(response)
+ case bp.output <- bp.buffer:
+ bp.rollOver()
+ }
+ }
+ close(bp.output)
+ for response := range bp.responses {
+ bp.handleResponse(response)
+ }
+
+ Logger.Printf("producer/broker/%d shut down\n", bp.broker.ID())
+}
+
+func (bp *brokerProducer) needsRetry(msg *ProducerMessage) error {
+ if bp.closing != nil {
+ return bp.closing
+ }
+
+ return bp.currentRetries[msg.Topic][msg.Partition]
+}
+
+func (bp *brokerProducer) waitForSpace(msg *ProducerMessage) error {
+ Logger.Printf("producer/broker/%d maximum request accumulated, waiting for space\n", bp.broker.ID())
+
+ for {
+ select {
+ case response := <-bp.responses:
+ bp.handleResponse(response)
+ // handling a response can change our state, so re-check some things
+ if reason := bp.needsRetry(msg); reason != nil {
+ return reason
+ } else if !bp.buffer.wouldOverflow(msg) {
+ return nil
+ }
+ case bp.output <- bp.buffer:
+ bp.rollOver()
+ return nil
+ }
+ }
+}
+
+func (bp *brokerProducer) rollOver() {
+ bp.timer = nil
+ bp.timerFired = false
+ bp.buffer = newProduceSet(bp.parent)
+}
+
+func (bp *brokerProducer) handleResponse(response *brokerProducerResponse) {
+ if response.err != nil {
+ bp.handleError(response.set, response.err)
+ } else {
+ bp.handleSuccess(response.set, response.res)
+ }
+
+ if bp.buffer.empty() {
+ bp.rollOver() // this can happen if the response invalidated our buffer
+ }
+}
+
+func (bp *brokerProducer) handleSuccess(sent *produceSet, response *ProduceResponse) {
+ // we iterate through the blocks in the request set, not the response, so that we notice
+ // if the response is missing a block completely
+ sent.eachPartition(func(topic string, partition int32, msgs []*ProducerMessage) {
+ if response == nil {
+ // this only happens when RequiredAcks is NoResponse, so we have to assume success
+ bp.parent.returnSuccesses(msgs)
+ return
+ }
+
+ block := response.GetBlock(topic, partition)
+ if block == nil {
+ bp.parent.returnErrors(msgs, ErrIncompleteResponse)
+ return
+ }
+
+ switch block.Err {
+ // Success
+ case ErrNoError:
+ if bp.parent.conf.Version.IsAtLeast(V0_10_0_0) && !block.Timestamp.IsZero() {
+ for _, msg := range msgs {
+ msg.Timestamp = block.Timestamp
+ }
+ }
+ for i, msg := range msgs {
+ msg.Offset = block.Offset + int64(i)
+ }
+ bp.parent.returnSuccesses(msgs)
+ // Retriable errors
+ case ErrInvalidMessage, ErrUnknownTopicOrPartition, ErrLeaderNotAvailable, ErrNotLeaderForPartition,
+ ErrRequestTimedOut, ErrNotEnoughReplicas, ErrNotEnoughReplicasAfterAppend:
+ Logger.Printf("producer/broker/%d state change to [retrying] on %s/%d because %v\n",
+ bp.broker.ID(), topic, partition, block.Err)
+ bp.currentRetries[topic][partition] = block.Err
+ bp.parent.retryMessages(msgs, block.Err)
+ bp.parent.retryMessages(bp.buffer.dropPartition(topic, partition), block.Err)
+ // Other non-retriable errors
+ default:
+ bp.parent.returnErrors(msgs, block.Err)
+ }
+ })
+}
+
+func (bp *brokerProducer) handleError(sent *produceSet, err error) {
+ switch err.(type) {
+ case PacketEncodingError:
+ sent.eachPartition(func(topic string, partition int32, msgs []*ProducerMessage) {
+ bp.parent.returnErrors(msgs, err)
+ })
+ default:
+ Logger.Printf("producer/broker/%d state change to [closing] because %s\n", bp.broker.ID(), err)
+ bp.parent.abandonBrokerConnection(bp.broker)
+ _ = bp.broker.Close()
+ bp.closing = err
+ sent.eachPartition(func(topic string, partition int32, msgs []*ProducerMessage) {
+ bp.parent.retryMessages(msgs, err)
+ })
+ bp.buffer.eachPartition(func(topic string, partition int32, msgs []*ProducerMessage) {
+ bp.parent.retryMessages(msgs, err)
+ })
+ bp.rollOver()
+ }
+}
+
+// singleton
+// effectively a "bridge" between the flushers and the dispatcher in order to avoid deadlock
+// based on https://godoc.org/github.com/eapache/channels#InfiniteChannel
+func (p *asyncProducer) retryHandler() {
+ var msg *ProducerMessage
+ buf := queue.New()
+
+ for {
+ if buf.Length() == 0 {
+ msg = <-p.retries
+ } else {
+ select {
+ case msg = <-p.retries:
+ case p.input <- buf.Peek().(*ProducerMessage):
+ buf.Remove()
+ continue
+ }
+ }
+
+ if msg == nil {
+ return
+ }
+
+ buf.Add(msg)
+ }
+}
+
+// utility functions
+
+func (p *asyncProducer) shutdown() {
+ Logger.Println("Producer shutting down.")
+ p.inFlight.Add(1)
+ p.input <- &ProducerMessage{flags: shutdown}
+
+ p.inFlight.Wait()
+
+ if p.ownClient {
+ err := p.client.Close()
+ if err != nil {
+ Logger.Println("producer/shutdown failed to close the embedded client:", err)
+ }
+ }
+
+ close(p.input)
+ close(p.retries)
+ close(p.errors)
+ close(p.successes)
+}
+
+func (p *asyncProducer) returnError(msg *ProducerMessage, err error) {
+ msg.clear()
+ pErr := &ProducerError{Msg: msg, Err: err}
+ if p.conf.Producer.Return.Errors {
+ p.errors <- pErr
+ } else {
+ Logger.Println(pErr)
+ }
+ p.inFlight.Done()
+}
+
+func (p *asyncProducer) returnErrors(batch []*ProducerMessage, err error) {
+ for _, msg := range batch {
+ p.returnError(msg, err)
+ }
+}
+
+func (p *asyncProducer) returnSuccesses(batch []*ProducerMessage) {
+ for _, msg := range batch {
+ if p.conf.Producer.Return.Successes {
+ msg.clear()
+ p.successes <- msg
+ }
+ p.inFlight.Done()
+ }
+}
+
+func (p *asyncProducer) retryMessage(msg *ProducerMessage, err error) {
+ if msg.retries >= p.conf.Producer.Retry.Max {
+ p.returnError(msg, err)
+ } else {
+ msg.retries++
+ p.retries <- msg
+ }
+}
+
+func (p *asyncProducer) retryMessages(batch []*ProducerMessage, err error) {
+ for _, msg := range batch {
+ p.retryMessage(msg, err)
+ }
+}
+
+func (p *asyncProducer) getBrokerProducer(broker *Broker) chan<- *ProducerMessage {
+ p.brokerLock.Lock()
+ defer p.brokerLock.Unlock()
+
+ bp := p.brokers[broker]
+
+ if bp == nil {
+ bp = p.newBrokerProducer(broker)
+ p.brokers[broker] = bp
+ p.brokerRefs[bp] = 0
+ }
+
+ p.brokerRefs[bp]++
+
+ return bp
+}
+
+func (p *asyncProducer) unrefBrokerProducer(broker *Broker, bp chan<- *ProducerMessage) {
+ p.brokerLock.Lock()
+ defer p.brokerLock.Unlock()
+
+ p.brokerRefs[bp]--
+ if p.brokerRefs[bp] == 0 {
+ close(bp)
+ delete(p.brokerRefs, bp)
+
+ if p.brokers[broker] == bp {
+ delete(p.brokers, broker)
+ }
+ }
+}
+
+func (p *asyncProducer) abandonBrokerConnection(broker *Broker) {
+ p.brokerLock.Lock()
+ defer p.brokerLock.Unlock()
+
+ delete(p.brokers, broker)
+}
diff --git a/vendor/github.com/Shopify/sarama/broker.go b/vendor/github.com/Shopify/sarama/broker.go
new file mode 100644
index 00000000..f57a6909
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/broker.go
@@ -0,0 +1,685 @@
+package sarama
+
+import (
+ "crypto/tls"
+ "encoding/binary"
+ "fmt"
+ "io"
+ "net"
+ "strconv"
+ "sync"
+ "sync/atomic"
+ "time"
+
+ "github.com/rcrowley/go-metrics"
+)
+
+// Broker represents a single Kafka broker connection. All operations on this object are entirely concurrency-safe.
+type Broker struct {
+ id int32
+ addr string
+
+ conf *Config
+ correlationID int32
+ conn net.Conn
+ connErr error
+ lock sync.Mutex
+ opened int32
+
+ responses chan responsePromise
+ done chan bool
+
+ incomingByteRate metrics.Meter
+ requestRate metrics.Meter
+ requestSize metrics.Histogram
+ requestLatency metrics.Histogram
+ outgoingByteRate metrics.Meter
+ responseRate metrics.Meter
+ responseSize metrics.Histogram
+ brokerIncomingByteRate metrics.Meter
+ brokerRequestRate metrics.Meter
+ brokerRequestSize metrics.Histogram
+ brokerRequestLatency metrics.Histogram
+ brokerOutgoingByteRate metrics.Meter
+ brokerResponseRate metrics.Meter
+ brokerResponseSize metrics.Histogram
+}
+
+type responsePromise struct {
+ requestTime time.Time
+ correlationID int32
+ packets chan []byte
+ errors chan error
+}
+
+// NewBroker creates and returns a Broker targeting the given host:port address.
+// This does not attempt to actually connect, you have to call Open() for that.
+func NewBroker(addr string) *Broker {
+ return &Broker{id: -1, addr: addr}
+}
+
+// Open tries to connect to the Broker if it is not already connected or connecting, but does not block
+// waiting for the connection to complete. This means that any subsequent operations on the broker will
+// block waiting for the connection to succeed or fail. To get the effect of a fully synchronous Open call,
+// follow it by a call to Connected(). The only errors Open will return directly are ConfigurationError or
+// AlreadyConnected. If conf is nil, the result of NewConfig() is used.
+func (b *Broker) Open(conf *Config) error {
+ if !atomic.CompareAndSwapInt32(&b.opened, 0, 1) {
+ return ErrAlreadyConnected
+ }
+
+ if conf == nil {
+ conf = NewConfig()
+ }
+
+ err := conf.Validate()
+ if err != nil {
+ return err
+ }
+
+ b.lock.Lock()
+
+ go withRecover(func() {
+ defer b.lock.Unlock()
+
+ dialer := net.Dialer{
+ Timeout: conf.Net.DialTimeout,
+ KeepAlive: conf.Net.KeepAlive,
+ }
+
+ if conf.Net.TLS.Enable {
+ b.conn, b.connErr = tls.DialWithDialer(&dialer, "tcp", b.addr, conf.Net.TLS.Config)
+ } else {
+ b.conn, b.connErr = dialer.Dial("tcp", b.addr)
+ }
+ if b.connErr != nil {
+ Logger.Printf("Failed to connect to broker %s: %s\n", b.addr, b.connErr)
+ b.conn = nil
+ atomic.StoreInt32(&b.opened, 0)
+ return
+ }
+ b.conn = newBufConn(b.conn)
+
+ b.conf = conf
+
+ // Create or reuse the global metrics shared between brokers
+ b.incomingByteRate = metrics.GetOrRegisterMeter("incoming-byte-rate", conf.MetricRegistry)
+ b.requestRate = metrics.GetOrRegisterMeter("request-rate", conf.MetricRegistry)
+ b.requestSize = getOrRegisterHistogram("request-size", conf.MetricRegistry)
+ b.requestLatency = getOrRegisterHistogram("request-latency-in-ms", conf.MetricRegistry)
+ b.outgoingByteRate = metrics.GetOrRegisterMeter("outgoing-byte-rate", conf.MetricRegistry)
+ b.responseRate = metrics.GetOrRegisterMeter("response-rate", conf.MetricRegistry)
+ b.responseSize = getOrRegisterHistogram("response-size", conf.MetricRegistry)
+ // Do not gather metrics for seeded broker (only used during bootstrap) because they share
+ // the same id (-1) and are already exposed through the global metrics above
+ if b.id >= 0 {
+ b.brokerIncomingByteRate = getOrRegisterBrokerMeter("incoming-byte-rate", b, conf.MetricRegistry)
+ b.brokerRequestRate = getOrRegisterBrokerMeter("request-rate", b, conf.MetricRegistry)
+ b.brokerRequestSize = getOrRegisterBrokerHistogram("request-size", b, conf.MetricRegistry)
+ b.brokerRequestLatency = getOrRegisterBrokerHistogram("request-latency-in-ms", b, conf.MetricRegistry)
+ b.brokerOutgoingByteRate = getOrRegisterBrokerMeter("outgoing-byte-rate", b, conf.MetricRegistry)
+ b.brokerResponseRate = getOrRegisterBrokerMeter("response-rate", b, conf.MetricRegistry)
+ b.brokerResponseSize = getOrRegisterBrokerHistogram("response-size", b, conf.MetricRegistry)
+ }
+
+ if conf.Net.SASL.Enable {
+ b.connErr = b.sendAndReceiveSASLPlainAuth()
+ if b.connErr != nil {
+ err = b.conn.Close()
+ if err == nil {
+ Logger.Printf("Closed connection to broker %s\n", b.addr)
+ } else {
+ Logger.Printf("Error while closing connection to broker %s: %s\n", b.addr, err)
+ }
+ b.conn = nil
+ atomic.StoreInt32(&b.opened, 0)
+ return
+ }
+ }
+
+ b.done = make(chan bool)
+ b.responses = make(chan responsePromise, b.conf.Net.MaxOpenRequests-1)
+
+ if b.id >= 0 {
+ Logger.Printf("Connected to broker at %s (registered as #%d)\n", b.addr, b.id)
+ } else {
+ Logger.Printf("Connected to broker at %s (unregistered)\n", b.addr)
+ }
+ go withRecover(b.responseReceiver)
+ })
+
+ return nil
+}
+
+// Connected returns true if the broker is connected and false otherwise. If the broker is not
+// connected but it had tried to connect, the error from that connection attempt is also returned.
+func (b *Broker) Connected() (bool, error) {
+ b.lock.Lock()
+ defer b.lock.Unlock()
+
+ return b.conn != nil, b.connErr
+}
+
+func (b *Broker) Close() error {
+ b.lock.Lock()
+ defer b.lock.Unlock()
+
+ if b.conn == nil {
+ return ErrNotConnected
+ }
+
+ close(b.responses)
+ <-b.done
+
+ err := b.conn.Close()
+
+ b.conn = nil
+ b.connErr = nil
+ b.done = nil
+ b.responses = nil
+
+ if err == nil {
+ Logger.Printf("Closed connection to broker %s\n", b.addr)
+ } else {
+ Logger.Printf("Error while closing connection to broker %s: %s\n", b.addr, err)
+ }
+
+ atomic.StoreInt32(&b.opened, 0)
+
+ return err
+}
+
+// ID returns the broker ID retrieved from Kafka's metadata, or -1 if that is not known.
+func (b *Broker) ID() int32 {
+ return b.id
+}
+
+// Addr returns the broker address as either retrieved from Kafka's metadata or passed to NewBroker.
+func (b *Broker) Addr() string {
+ return b.addr
+}
+
+func (b *Broker) GetMetadata(request *MetadataRequest) (*MetadataResponse, error) {
+ response := new(MetadataResponse)
+
+ err := b.sendAndReceive(request, response)
+
+ if err != nil {
+ return nil, err
+ }
+
+ return response, nil
+}
+
+func (b *Broker) GetConsumerMetadata(request *ConsumerMetadataRequest) (*ConsumerMetadataResponse, error) {
+ response := new(ConsumerMetadataResponse)
+
+ err := b.sendAndReceive(request, response)
+
+ if err != nil {
+ return nil, err
+ }
+
+ return response, nil
+}
+
+func (b *Broker) GetAvailableOffsets(request *OffsetRequest) (*OffsetResponse, error) {
+ response := new(OffsetResponse)
+
+ err := b.sendAndReceive(request, response)
+
+ if err != nil {
+ return nil, err
+ }
+
+ return response, nil
+}
+
+func (b *Broker) Produce(request *ProduceRequest) (*ProduceResponse, error) {
+ var response *ProduceResponse
+ var err error
+
+ if request.RequiredAcks == NoResponse {
+ err = b.sendAndReceive(request, nil)
+ } else {
+ response = new(ProduceResponse)
+ err = b.sendAndReceive(request, response)
+ }
+
+ if err != nil {
+ return nil, err
+ }
+
+ return response, nil
+}
+
+func (b *Broker) Fetch(request *FetchRequest) (*FetchResponse, error) {
+ response := new(FetchResponse)
+
+ err := b.sendAndReceive(request, response)
+
+ if err != nil {
+ return nil, err
+ }
+
+ return response, nil
+}
+
+func (b *Broker) CommitOffset(request *OffsetCommitRequest) (*OffsetCommitResponse, error) {
+ response := new(OffsetCommitResponse)
+
+ err := b.sendAndReceive(request, response)
+
+ if err != nil {
+ return nil, err
+ }
+
+ return response, nil
+}
+
+func (b *Broker) FetchOffset(request *OffsetFetchRequest) (*OffsetFetchResponse, error) {
+ response := new(OffsetFetchResponse)
+
+ err := b.sendAndReceive(request, response)
+
+ if err != nil {
+ return nil, err
+ }
+
+ return response, nil
+}
+
+func (b *Broker) JoinGroup(request *JoinGroupRequest) (*JoinGroupResponse, error) {
+ response := new(JoinGroupResponse)
+
+ err := b.sendAndReceive(request, response)
+ if err != nil {
+ return nil, err
+ }
+
+ return response, nil
+}
+
+func (b *Broker) SyncGroup(request *SyncGroupRequest) (*SyncGroupResponse, error) {
+ response := new(SyncGroupResponse)
+
+ err := b.sendAndReceive(request, response)
+ if err != nil {
+ return nil, err
+ }
+
+ return response, nil
+}
+
+func (b *Broker) LeaveGroup(request *LeaveGroupRequest) (*LeaveGroupResponse, error) {
+ response := new(LeaveGroupResponse)
+
+ err := b.sendAndReceive(request, response)
+ if err != nil {
+ return nil, err
+ }
+
+ return response, nil
+}
+
+func (b *Broker) Heartbeat(request *HeartbeatRequest) (*HeartbeatResponse, error) {
+ response := new(HeartbeatResponse)
+
+ err := b.sendAndReceive(request, response)
+ if err != nil {
+ return nil, err
+ }
+
+ return response, nil
+}
+
+func (b *Broker) ListGroups(request *ListGroupsRequest) (*ListGroupsResponse, error) {
+ response := new(ListGroupsResponse)
+
+ err := b.sendAndReceive(request, response)
+ if err != nil {
+ return nil, err
+ }
+
+ return response, nil
+}
+
+func (b *Broker) DescribeGroups(request *DescribeGroupsRequest) (*DescribeGroupsResponse, error) {
+ response := new(DescribeGroupsResponse)
+
+ err := b.sendAndReceive(request, response)
+ if err != nil {
+ return nil, err
+ }
+
+ return response, nil
+}
+
+func (b *Broker) ApiVersions(request *ApiVersionsRequest) (*ApiVersionsResponse, error) {
+ response := new(ApiVersionsResponse)
+
+ err := b.sendAndReceive(request, response)
+ if err != nil {
+ return nil, err
+ }
+
+ return response, nil
+}
+
+func (b *Broker) send(rb protocolBody, promiseResponse bool) (*responsePromise, error) {
+ b.lock.Lock()
+ defer b.lock.Unlock()
+
+ if b.conn == nil {
+ if b.connErr != nil {
+ return nil, b.connErr
+ }
+ return nil, ErrNotConnected
+ }
+
+ if !b.conf.Version.IsAtLeast(rb.requiredVersion()) {
+ return nil, ErrUnsupportedVersion
+ }
+
+ req := &request{correlationID: b.correlationID, clientID: b.conf.ClientID, body: rb}
+ buf, err := encode(req, b.conf.MetricRegistry)
+ if err != nil {
+ return nil, err
+ }
+
+ err = b.conn.SetWriteDeadline(time.Now().Add(b.conf.Net.WriteTimeout))
+ if err != nil {
+ return nil, err
+ }
+
+ requestTime := time.Now()
+ bytes, err := b.conn.Write(buf)
+ b.updateOutgoingCommunicationMetrics(bytes)
+ if err != nil {
+ return nil, err
+ }
+ b.correlationID++
+
+ if !promiseResponse {
+ // Record request latency without the response
+ b.updateRequestLatencyMetrics(time.Since(requestTime))
+ return nil, nil
+ }
+
+ promise := responsePromise{requestTime, req.correlationID, make(chan []byte), make(chan error)}
+ b.responses <- promise
+
+ return &promise, nil
+}
+
+func (b *Broker) sendAndReceive(req protocolBody, res versionedDecoder) error {
+ promise, err := b.send(req, res != nil)
+
+ if err != nil {
+ return err
+ }
+
+ if promise == nil {
+ return nil
+ }
+
+ select {
+ case buf := <-promise.packets:
+ return versionedDecode(buf, res, req.version())
+ case err = <-promise.errors:
+ return err
+ }
+}
+
+func (b *Broker) decode(pd packetDecoder) (err error) {
+ b.id, err = pd.getInt32()
+ if err != nil {
+ return err
+ }
+
+ host, err := pd.getString()
+ if err != nil {
+ return err
+ }
+
+ port, err := pd.getInt32()
+ if err != nil {
+ return err
+ }
+
+ b.addr = net.JoinHostPort(host, fmt.Sprint(port))
+ if _, _, err := net.SplitHostPort(b.addr); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (b *Broker) encode(pe packetEncoder) (err error) {
+
+ host, portstr, err := net.SplitHostPort(b.addr)
+ if err != nil {
+ return err
+ }
+ port, err := strconv.Atoi(portstr)
+ if err != nil {
+ return err
+ }
+
+ pe.putInt32(b.id)
+
+ err = pe.putString(host)
+ if err != nil {
+ return err
+ }
+
+ pe.putInt32(int32(port))
+
+ return nil
+}
+
+func (b *Broker) responseReceiver() {
+ var dead error
+ header := make([]byte, 8)
+ for response := range b.responses {
+ if dead != nil {
+ response.errors <- dead
+ continue
+ }
+
+ err := b.conn.SetReadDeadline(time.Now().Add(b.conf.Net.ReadTimeout))
+ if err != nil {
+ dead = err
+ response.errors <- err
+ continue
+ }
+
+ bytesReadHeader, err := io.ReadFull(b.conn, header)
+ requestLatency := time.Since(response.requestTime)
+ if err != nil {
+ b.updateIncomingCommunicationMetrics(bytesReadHeader, requestLatency)
+ dead = err
+ response.errors <- err
+ continue
+ }
+
+ decodedHeader := responseHeader{}
+ err = decode(header, &decodedHeader)
+ if err != nil {
+ b.updateIncomingCommunicationMetrics(bytesReadHeader, requestLatency)
+ dead = err
+ response.errors <- err
+ continue
+ }
+ if decodedHeader.correlationID != response.correlationID {
+ b.updateIncomingCommunicationMetrics(bytesReadHeader, requestLatency)
+ // TODO if decoded ID < cur ID, discard until we catch up
+ // TODO if decoded ID > cur ID, save it so when cur ID catches up we have a response
+ dead = PacketDecodingError{fmt.Sprintf("correlation ID didn't match, wanted %d, got %d", response.correlationID, decodedHeader.correlationID)}
+ response.errors <- dead
+ continue
+ }
+
+ buf := make([]byte, decodedHeader.length-4)
+ bytesReadBody, err := io.ReadFull(b.conn, buf)
+ b.updateIncomingCommunicationMetrics(bytesReadHeader+bytesReadBody, requestLatency)
+ if err != nil {
+ dead = err
+ response.errors <- err
+ continue
+ }
+
+ response.packets <- buf
+ }
+ close(b.done)
+}
+
+func (b *Broker) sendAndReceiveSASLPlainHandshake() error {
+ rb := &SaslHandshakeRequest{"PLAIN"}
+ req := &request{correlationID: b.correlationID, clientID: b.conf.ClientID, body: rb}
+ buf, err := encode(req, b.conf.MetricRegistry)
+ if err != nil {
+ return err
+ }
+
+ err = b.conn.SetWriteDeadline(time.Now().Add(b.conf.Net.WriteTimeout))
+ if err != nil {
+ return err
+ }
+
+ requestTime := time.Now()
+ bytes, err := b.conn.Write(buf)
+ b.updateOutgoingCommunicationMetrics(bytes)
+ if err != nil {
+ Logger.Printf("Failed to send SASL handshake %s: %s\n", b.addr, err.Error())
+ return err
+ }
+ b.correlationID++
+ //wait for the response
+ header := make([]byte, 8) // response header
+ _, err = io.ReadFull(b.conn, header)
+ if err != nil {
+ Logger.Printf("Failed to read SASL handshake header : %s\n", err.Error())
+ return err
+ }
+ length := binary.BigEndian.Uint32(header[:4])
+ payload := make([]byte, length-4)
+ n, err := io.ReadFull(b.conn, payload)
+ if err != nil {
+ Logger.Printf("Failed to read SASL handshake payload : %s\n", err.Error())
+ return err
+ }
+ b.updateIncomingCommunicationMetrics(n+8, time.Since(requestTime))
+ res := &SaslHandshakeResponse{}
+ err = versionedDecode(payload, res, 0)
+ if err != nil {
+ Logger.Printf("Failed to parse SASL handshake : %s\n", err.Error())
+ return err
+ }
+ if res.Err != ErrNoError {
+ Logger.Printf("Invalid SASL Mechanism : %s\n", res.Err.Error())
+ return res.Err
+ }
+ Logger.Print("Successful SASL handshake")
+ return nil
+}
+
+// Kafka 0.10.0 plans to support SASL Plain and Kerberos as per PR #812 (KIP-43)/(JIRA KAFKA-3149)
+// Some hosted kafka services such as IBM Message Hub already offer SASL/PLAIN auth with Kafka 0.9
+//
+// In SASL Plain, Kafka expects the auth header to be in the following format
+// Message format (from https://tools.ietf.org/html/rfc4616):
+//
+// message = [authzid] UTF8NUL authcid UTF8NUL passwd
+// authcid = 1*SAFE ; MUST accept up to 255 octets
+// authzid = 1*SAFE ; MUST accept up to 255 octets
+// passwd = 1*SAFE ; MUST accept up to 255 octets
+// UTF8NUL = %x00 ; UTF-8 encoded NUL character
+//
+// SAFE = UTF1 / UTF2 / UTF3 / UTF4
+// ;; any UTF-8 encoded Unicode character except NUL
+//
+// When credentials are valid, Kafka returns a 4 byte array of null characters.
+// When credentials are invalid, Kafka closes the connection. This does not seem to be the ideal way
+// of responding to bad credentials but thats how its being done today.
+func (b *Broker) sendAndReceiveSASLPlainAuth() error {
+ if b.conf.Net.SASL.Handshake {
+ handshakeErr := b.sendAndReceiveSASLPlainHandshake()
+ if handshakeErr != nil {
+ Logger.Printf("Error while performing SASL handshake %s\n", b.addr)
+ return handshakeErr
+ }
+ }
+ length := 1 + len(b.conf.Net.SASL.User) + 1 + len(b.conf.Net.SASL.Password)
+ authBytes := make([]byte, length+4) //4 byte length header + auth data
+ binary.BigEndian.PutUint32(authBytes, uint32(length))
+ copy(authBytes[4:], []byte("\x00"+b.conf.Net.SASL.User+"\x00"+b.conf.Net.SASL.Password))
+
+ err := b.conn.SetWriteDeadline(time.Now().Add(b.conf.Net.WriteTimeout))
+ if err != nil {
+ Logger.Printf("Failed to set write deadline when doing SASL auth with broker %s: %s\n", b.addr, err.Error())
+ return err
+ }
+
+ requestTime := time.Now()
+ bytesWritten, err := b.conn.Write(authBytes)
+ b.updateOutgoingCommunicationMetrics(bytesWritten)
+ if err != nil {
+ Logger.Printf("Failed to write SASL auth header to broker %s: %s\n", b.addr, err.Error())
+ return err
+ }
+
+ header := make([]byte, 4)
+ n, err := io.ReadFull(b.conn, header)
+ b.updateIncomingCommunicationMetrics(n, time.Since(requestTime))
+ // If the credentials are valid, we would get a 4 byte response filled with null characters.
+ // Otherwise, the broker closes the connection and we get an EOF
+ if err != nil {
+ Logger.Printf("Failed to read response while authenticating with SASL to broker %s: %s\n", b.addr, err.Error())
+ return err
+ }
+
+ Logger.Printf("SASL authentication successful with broker %s:%v - %v\n", b.addr, n, header)
+ return nil
+}
+
+func (b *Broker) updateIncomingCommunicationMetrics(bytes int, requestLatency time.Duration) {
+ b.updateRequestLatencyMetrics(requestLatency)
+ b.responseRate.Mark(1)
+ if b.brokerResponseRate != nil {
+ b.brokerResponseRate.Mark(1)
+ }
+ responseSize := int64(bytes)
+ b.incomingByteRate.Mark(responseSize)
+ if b.brokerIncomingByteRate != nil {
+ b.brokerIncomingByteRate.Mark(responseSize)
+ }
+ b.responseSize.Update(responseSize)
+ if b.brokerResponseSize != nil {
+ b.brokerResponseSize.Update(responseSize)
+ }
+}
+
+func (b *Broker) updateRequestLatencyMetrics(requestLatency time.Duration) {
+ requestLatencyInMs := int64(requestLatency / time.Millisecond)
+ b.requestLatency.Update(requestLatencyInMs)
+ if b.brokerRequestLatency != nil {
+ b.brokerRequestLatency.Update(requestLatencyInMs)
+ }
+}
+
+func (b *Broker) updateOutgoingCommunicationMetrics(bytes int) {
+ b.requestRate.Mark(1)
+ if b.brokerRequestRate != nil {
+ b.brokerRequestRate.Mark(1)
+ }
+ requestSize := int64(bytes)
+ b.outgoingByteRate.Mark(requestSize)
+ if b.brokerOutgoingByteRate != nil {
+ b.brokerOutgoingByteRate.Mark(requestSize)
+ }
+ b.requestSize.Update(requestSize)
+ if b.brokerRequestSize != nil {
+ b.brokerRequestSize.Update(requestSize)
+ }
+}
diff --git a/vendor/github.com/Shopify/sarama/client.go b/vendor/github.com/Shopify/sarama/client.go
new file mode 100644
index 00000000..45de3973
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/client.go
@@ -0,0 +1,779 @@
+package sarama
+
+import (
+ "math/rand"
+ "sort"
+ "sync"
+ "time"
+)
+
+// Client is a generic Kafka client. It manages connections to one or more Kafka brokers.
+// You MUST call Close() on a client to avoid leaks, it will not be garbage-collected
+// automatically when it passes out of scope. It is safe to share a client amongst many
+// users, however Kafka will process requests from a single client strictly in serial,
+// so it is generally more efficient to use the default one client per producer/consumer.
+type Client interface {
+ // Config returns the Config struct of the client. This struct should not be
+ // altered after it has been created.
+ Config() *Config
+
+ // Brokers returns the current set of active brokers as retrieved from cluster metadata.
+ Brokers() []*Broker
+
+ // Topics returns the set of available topics as retrieved from cluster metadata.
+ Topics() ([]string, error)
+
+ // Partitions returns the sorted list of all partition IDs for the given topic.
+ Partitions(topic string) ([]int32, error)
+
+ // WritablePartitions returns the sorted list of all writable partition IDs for
+ // the given topic, where "writable" means "having a valid leader accepting
+ // writes".
+ WritablePartitions(topic string) ([]int32, error)
+
+ // Leader returns the broker object that is the leader of the current
+ // topic/partition, as determined by querying the cluster metadata.
+ Leader(topic string, partitionID int32) (*Broker, error)
+
+ // Replicas returns the set of all replica IDs for the given partition.
+ Replicas(topic string, partitionID int32) ([]int32, error)
+
+ // InSyncReplicas returns the set of all in-sync replica IDs for the given
+ // partition. In-sync replicas are replicas which are fully caught up with
+ // the partition leader.
+ InSyncReplicas(topic string, partitionID int32) ([]int32, error)
+
+ // RefreshMetadata takes a list of topics and queries the cluster to refresh the
+ // available metadata for those topics. If no topics are provided, it will refresh
+ // metadata for all topics.
+ RefreshMetadata(topics ...string) error
+
+ // GetOffset queries the cluster to get the most recent available offset at the
+ // given time on the topic/partition combination. Time should be OffsetOldest for
+ // the earliest available offset, OffsetNewest for the offset of the message that
+ // will be produced next, or a time.
+ GetOffset(topic string, partitionID int32, time int64) (int64, error)
+
+ // Coordinator returns the coordinating broker for a consumer group. It will
+ // return a locally cached value if it's available. You can call
+ // RefreshCoordinator to update the cached value. This function only works on
+ // Kafka 0.8.2 and higher.
+ Coordinator(consumerGroup string) (*Broker, error)
+
+ // RefreshCoordinator retrieves the coordinator for a consumer group and stores it
+ // in local cache. This function only works on Kafka 0.8.2 and higher.
+ RefreshCoordinator(consumerGroup string) error
+
+ // Close shuts down all broker connections managed by this client. It is required
+ // to call this function before a client object passes out of scope, as it will
+ // otherwise leak memory. You must close any Producers or Consumers using a client
+ // before you close the client.
+ Close() error
+
+ // Closed returns true if the client has already had Close called on it
+ Closed() bool
+}
+
+const (
+ // OffsetNewest stands for the log head offset, i.e. the offset that will be
+ // assigned to the next message that will be produced to the partition. You
+ // can send this to a client's GetOffset method to get this offset, or when
+ // calling ConsumePartition to start consuming new messages.
+ OffsetNewest int64 = -1
+ // OffsetOldest stands for the oldest offset available on the broker for a
+ // partition. You can send this to a client's GetOffset method to get this
+ // offset, or when calling ConsumePartition to start consuming from the
+ // oldest offset that is still available on the broker.
+ OffsetOldest int64 = -2
+)
+
+type client struct {
+ conf *Config
+ closer, closed chan none // for shutting down background metadata updater
+
+ // the broker addresses given to us through the constructor are not guaranteed to be returned in
+ // the cluster metadata (I *think* it only returns brokers who are currently leading partitions?)
+ // so we store them separately
+ seedBrokers []*Broker
+ deadSeeds []*Broker
+
+ brokers map[int32]*Broker // maps broker ids to brokers
+ metadata map[string]map[int32]*PartitionMetadata // maps topics to partition ids to metadata
+ coordinators map[string]int32 // Maps consumer group names to coordinating broker IDs
+
+ // If the number of partitions is large, we can get some churn calling cachedPartitions,
+ // so the result is cached. It is important to update this value whenever metadata is changed
+ cachedPartitionsResults map[string][maxPartitionIndex][]int32
+
+ lock sync.RWMutex // protects access to the maps that hold cluster state.
+}
+
+// NewClient creates a new Client. It connects to one of the given broker addresses
+// and uses that broker to automatically fetch metadata on the rest of the kafka cluster. If metadata cannot
+// be retrieved from any of the given broker addresses, the client is not created.
+func NewClient(addrs []string, conf *Config) (Client, error) {
+ Logger.Println("Initializing new client")
+
+ if conf == nil {
+ conf = NewConfig()
+ }
+
+ if err := conf.Validate(); err != nil {
+ return nil, err
+ }
+
+ if len(addrs) < 1 {
+ return nil, ConfigurationError("You must provide at least one broker address")
+ }
+
+ client := &client{
+ conf: conf,
+ closer: make(chan none),
+ closed: make(chan none),
+ brokers: make(map[int32]*Broker),
+ metadata: make(map[string]map[int32]*PartitionMetadata),
+ cachedPartitionsResults: make(map[string][maxPartitionIndex][]int32),
+ coordinators: make(map[string]int32),
+ }
+
+ random := rand.New(rand.NewSource(time.Now().UnixNano()))
+ for _, index := range random.Perm(len(addrs)) {
+ client.seedBrokers = append(client.seedBrokers, NewBroker(addrs[index]))
+ }
+
+ // do an initial fetch of all cluster metadata by specifying an empty list of topics
+ err := client.RefreshMetadata()
+ switch err {
+ case nil:
+ break
+ case ErrLeaderNotAvailable, ErrReplicaNotAvailable, ErrTopicAuthorizationFailed, ErrClusterAuthorizationFailed:
+ // indicates that maybe part of the cluster is down, but is not fatal to creating the client
+ Logger.Println(err)
+ default:
+ close(client.closed) // we haven't started the background updater yet, so we have to do this manually
+ _ = client.Close()
+ return nil, err
+ }
+ go withRecover(client.backgroundMetadataUpdater)
+
+ Logger.Println("Successfully initialized new client")
+
+ return client, nil
+}
+
+func (client *client) Config() *Config {
+ return client.conf
+}
+
+func (client *client) Brokers() []*Broker {
+ client.lock.RLock()
+ defer client.lock.RUnlock()
+ brokers := make([]*Broker, 0)
+ for _, broker := range client.brokers {
+ brokers = append(brokers, broker)
+ }
+ return brokers
+}
+
+func (client *client) Close() error {
+ if client.Closed() {
+ // Chances are this is being called from a defer() and the error will go unobserved
+ // so we go ahead and log the event in this case.
+ Logger.Printf("Close() called on already closed client")
+ return ErrClosedClient
+ }
+
+ // shutdown and wait for the background thread before we take the lock, to avoid races
+ close(client.closer)
+ <-client.closed
+
+ client.lock.Lock()
+ defer client.lock.Unlock()
+ Logger.Println("Closing Client")
+
+ for _, broker := range client.brokers {
+ safeAsyncClose(broker)
+ }
+
+ for _, broker := range client.seedBrokers {
+ safeAsyncClose(broker)
+ }
+
+ client.brokers = nil
+ client.metadata = nil
+
+ return nil
+}
+
+func (client *client) Closed() bool {
+ return client.brokers == nil
+}
+
+func (client *client) Topics() ([]string, error) {
+ if client.Closed() {
+ return nil, ErrClosedClient
+ }
+
+ client.lock.RLock()
+ defer client.lock.RUnlock()
+
+ ret := make([]string, 0, len(client.metadata))
+ for topic := range client.metadata {
+ ret = append(ret, topic)
+ }
+
+ return ret, nil
+}
+
+func (client *client) Partitions(topic string) ([]int32, error) {
+ if client.Closed() {
+ return nil, ErrClosedClient
+ }
+
+ partitions := client.cachedPartitions(topic, allPartitions)
+
+ if len(partitions) == 0 {
+ err := client.RefreshMetadata(topic)
+ if err != nil {
+ return nil, err
+ }
+ partitions = client.cachedPartitions(topic, allPartitions)
+ }
+
+ if partitions == nil {
+ return nil, ErrUnknownTopicOrPartition
+ }
+
+ return partitions, nil
+}
+
+func (client *client) WritablePartitions(topic string) ([]int32, error) {
+ if client.Closed() {
+ return nil, ErrClosedClient
+ }
+
+ partitions := client.cachedPartitions(topic, writablePartitions)
+
+ // len==0 catches when it's nil (no such topic) and the odd case when every single
+ // partition is undergoing leader election simultaneously. Callers have to be able to handle
+ // this function returning an empty slice (which is a valid return value) but catching it
+ // here the first time (note we *don't* catch it below where we return ErrUnknownTopicOrPartition) triggers
+ // a metadata refresh as a nicety so callers can just try again and don't have to manually
+ // trigger a refresh (otherwise they'd just keep getting a stale cached copy).
+ if len(partitions) == 0 {
+ err := client.RefreshMetadata(topic)
+ if err != nil {
+ return nil, err
+ }
+ partitions = client.cachedPartitions(topic, writablePartitions)
+ }
+
+ if partitions == nil {
+ return nil, ErrUnknownTopicOrPartition
+ }
+
+ return partitions, nil
+}
+
+func (client *client) Replicas(topic string, partitionID int32) ([]int32, error) {
+ if client.Closed() {
+ return nil, ErrClosedClient
+ }
+
+ metadata := client.cachedMetadata(topic, partitionID)
+
+ if metadata == nil {
+ err := client.RefreshMetadata(topic)
+ if err != nil {
+ return nil, err
+ }
+ metadata = client.cachedMetadata(topic, partitionID)
+ }
+
+ if metadata == nil {
+ return nil, ErrUnknownTopicOrPartition
+ }
+
+ if metadata.Err == ErrReplicaNotAvailable {
+ return nil, metadata.Err
+ }
+ return dupeAndSort(metadata.Replicas), nil
+}
+
+func (client *client) InSyncReplicas(topic string, partitionID int32) ([]int32, error) {
+ if client.Closed() {
+ return nil, ErrClosedClient
+ }
+
+ metadata := client.cachedMetadata(topic, partitionID)
+
+ if metadata == nil {
+ err := client.RefreshMetadata(topic)
+ if err != nil {
+ return nil, err
+ }
+ metadata = client.cachedMetadata(topic, partitionID)
+ }
+
+ if metadata == nil {
+ return nil, ErrUnknownTopicOrPartition
+ }
+
+ if metadata.Err == ErrReplicaNotAvailable {
+ return nil, metadata.Err
+ }
+ return dupeAndSort(metadata.Isr), nil
+}
+
+func (client *client) Leader(topic string, partitionID int32) (*Broker, error) {
+ if client.Closed() {
+ return nil, ErrClosedClient
+ }
+
+ leader, err := client.cachedLeader(topic, partitionID)
+
+ if leader == nil {
+ err = client.RefreshMetadata(topic)
+ if err != nil {
+ return nil, err
+ }
+ leader, err = client.cachedLeader(topic, partitionID)
+ }
+
+ return leader, err
+}
+
+func (client *client) RefreshMetadata(topics ...string) error {
+ if client.Closed() {
+ return ErrClosedClient
+ }
+
+ // Prior to 0.8.2, Kafka will throw exceptions on an empty topic and not return a proper
+ // error. This handles the case by returning an error instead of sending it
+ // off to Kafka. See: https://github.com/Shopify/sarama/pull/38#issuecomment-26362310
+ for _, topic := range topics {
+ if len(topic) == 0 {
+ return ErrInvalidTopic // this is the error that 0.8.2 and later correctly return
+ }
+ }
+
+ return client.tryRefreshMetadata(topics, client.conf.Metadata.Retry.Max)
+}
+
+func (client *client) GetOffset(topic string, partitionID int32, time int64) (int64, error) {
+ if client.Closed() {
+ return -1, ErrClosedClient
+ }
+
+ offset, err := client.getOffset(topic, partitionID, time)
+
+ if err != nil {
+ if err := client.RefreshMetadata(topic); err != nil {
+ return -1, err
+ }
+ return client.getOffset(topic, partitionID, time)
+ }
+
+ return offset, err
+}
+
+func (client *client) Coordinator(consumerGroup string) (*Broker, error) {
+ if client.Closed() {
+ return nil, ErrClosedClient
+ }
+
+ coordinator := client.cachedCoordinator(consumerGroup)
+
+ if coordinator == nil {
+ if err := client.RefreshCoordinator(consumerGroup); err != nil {
+ return nil, err
+ }
+ coordinator = client.cachedCoordinator(consumerGroup)
+ }
+
+ if coordinator == nil {
+ return nil, ErrConsumerCoordinatorNotAvailable
+ }
+
+ _ = coordinator.Open(client.conf)
+ return coordinator, nil
+}
+
+func (client *client) RefreshCoordinator(consumerGroup string) error {
+ if client.Closed() {
+ return ErrClosedClient
+ }
+
+ response, err := client.getConsumerMetadata(consumerGroup, client.conf.Metadata.Retry.Max)
+ if err != nil {
+ return err
+ }
+
+ client.lock.Lock()
+ defer client.lock.Unlock()
+ client.registerBroker(response.Coordinator)
+ client.coordinators[consumerGroup] = response.Coordinator.ID()
+ return nil
+}
+
+// private broker management helpers
+
+// registerBroker makes sure a broker received by a Metadata or Coordinator request is registered
+// in the brokers map. It returns the broker that is registered, which may be the provided broker,
+// or a previously registered Broker instance. You must hold the write lock before calling this function.
+func (client *client) registerBroker(broker *Broker) {
+ if client.brokers[broker.ID()] == nil {
+ client.brokers[broker.ID()] = broker
+ Logger.Printf("client/brokers registered new broker #%d at %s", broker.ID(), broker.Addr())
+ } else if broker.Addr() != client.brokers[broker.ID()].Addr() {
+ safeAsyncClose(client.brokers[broker.ID()])
+ client.brokers[broker.ID()] = broker
+ Logger.Printf("client/brokers replaced registered broker #%d with %s", broker.ID(), broker.Addr())
+ }
+}
+
+// deregisterBroker removes a broker from the seedsBroker list, and if it's
+// not the seedbroker, removes it from brokers map completely.
+func (client *client) deregisterBroker(broker *Broker) {
+ client.lock.Lock()
+ defer client.lock.Unlock()
+
+ if len(client.seedBrokers) > 0 && broker == client.seedBrokers[0] {
+ client.deadSeeds = append(client.deadSeeds, broker)
+ client.seedBrokers = client.seedBrokers[1:]
+ } else {
+ // we do this so that our loop in `tryRefreshMetadata` doesn't go on forever,
+ // but we really shouldn't have to; once that loop is made better this case can be
+ // removed, and the function generally can be renamed from `deregisterBroker` to
+ // `nextSeedBroker` or something
+ Logger.Printf("client/brokers deregistered broker #%d at %s", broker.ID(), broker.Addr())
+ delete(client.brokers, broker.ID())
+ }
+}
+
+func (client *client) resurrectDeadBrokers() {
+ client.lock.Lock()
+ defer client.lock.Unlock()
+
+ Logger.Printf("client/brokers resurrecting %d dead seed brokers", len(client.deadSeeds))
+ client.seedBrokers = append(client.seedBrokers, client.deadSeeds...)
+ client.deadSeeds = nil
+}
+
+func (client *client) any() *Broker {
+ client.lock.RLock()
+ defer client.lock.RUnlock()
+
+ if len(client.seedBrokers) > 0 {
+ _ = client.seedBrokers[0].Open(client.conf)
+ return client.seedBrokers[0]
+ }
+
+ // not guaranteed to be random *or* deterministic
+ for _, broker := range client.brokers {
+ _ = broker.Open(client.conf)
+ return broker
+ }
+
+ return nil
+}
+
+// private caching/lazy metadata helpers
+
+type partitionType int
+
+const (
+ allPartitions partitionType = iota
+ writablePartitions
+ // If you add any more types, update the partition cache in update()
+
+ // Ensure this is the last partition type value
+ maxPartitionIndex
+)
+
+func (client *client) cachedMetadata(topic string, partitionID int32) *PartitionMetadata {
+ client.lock.RLock()
+ defer client.lock.RUnlock()
+
+ partitions := client.metadata[topic]
+ if partitions != nil {
+ return partitions[partitionID]
+ }
+
+ return nil
+}
+
+func (client *client) cachedPartitions(topic string, partitionSet partitionType) []int32 {
+ client.lock.RLock()
+ defer client.lock.RUnlock()
+
+ partitions, exists := client.cachedPartitionsResults[topic]
+
+ if !exists {
+ return nil
+ }
+ return partitions[partitionSet]
+}
+
+func (client *client) setPartitionCache(topic string, partitionSet partitionType) []int32 {
+ partitions := client.metadata[topic]
+
+ if partitions == nil {
+ return nil
+ }
+
+ ret := make([]int32, 0, len(partitions))
+ for _, partition := range partitions {
+ if partitionSet == writablePartitions && partition.Err == ErrLeaderNotAvailable {
+ continue
+ }
+ ret = append(ret, partition.ID)
+ }
+
+ sort.Sort(int32Slice(ret))
+ return ret
+}
+
+func (client *client) cachedLeader(topic string, partitionID int32) (*Broker, error) {
+ client.lock.RLock()
+ defer client.lock.RUnlock()
+
+ partitions := client.metadata[topic]
+ if partitions != nil {
+ metadata, ok := partitions[partitionID]
+ if ok {
+ if metadata.Err == ErrLeaderNotAvailable {
+ return nil, ErrLeaderNotAvailable
+ }
+ b := client.brokers[metadata.Leader]
+ if b == nil {
+ return nil, ErrLeaderNotAvailable
+ }
+ _ = b.Open(client.conf)
+ return b, nil
+ }
+ }
+
+ return nil, ErrUnknownTopicOrPartition
+}
+
+func (client *client) getOffset(topic string, partitionID int32, time int64) (int64, error) {
+ broker, err := client.Leader(topic, partitionID)
+ if err != nil {
+ return -1, err
+ }
+
+ request := &OffsetRequest{}
+ if client.conf.Version.IsAtLeast(V0_10_1_0) {
+ request.Version = 1
+ }
+ request.AddBlock(topic, partitionID, time, 1)
+
+ response, err := broker.GetAvailableOffsets(request)
+ if err != nil {
+ _ = broker.Close()
+ return -1, err
+ }
+
+ block := response.GetBlock(topic, partitionID)
+ if block == nil {
+ _ = broker.Close()
+ return -1, ErrIncompleteResponse
+ }
+ if block.Err != ErrNoError {
+ return -1, block.Err
+ }
+ if len(block.Offsets) != 1 {
+ return -1, ErrOffsetOutOfRange
+ }
+
+ return block.Offsets[0], nil
+}
+
+// core metadata update logic
+
+func (client *client) backgroundMetadataUpdater() {
+ defer close(client.closed)
+
+ if client.conf.Metadata.RefreshFrequency == time.Duration(0) {
+ return
+ }
+
+ ticker := time.NewTicker(client.conf.Metadata.RefreshFrequency)
+ defer ticker.Stop()
+
+ for {
+ select {
+ case <-ticker.C:
+ if err := client.RefreshMetadata(); err != nil {
+ Logger.Println("Client background metadata update:", err)
+ }
+ case <-client.closer:
+ return
+ }
+ }
+}
+
+func (client *client) tryRefreshMetadata(topics []string, attemptsRemaining int) error {
+ retry := func(err error) error {
+ if attemptsRemaining > 0 {
+ Logger.Printf("client/metadata retrying after %dms... (%d attempts remaining)\n", client.conf.Metadata.Retry.Backoff/time.Millisecond, attemptsRemaining)
+ time.Sleep(client.conf.Metadata.Retry.Backoff)
+ return client.tryRefreshMetadata(topics, attemptsRemaining-1)
+ }
+ return err
+ }
+
+ for broker := client.any(); broker != nil; broker = client.any() {
+ if len(topics) > 0 {
+ Logger.Printf("client/metadata fetching metadata for %v from broker %s\n", topics, broker.addr)
+ } else {
+ Logger.Printf("client/metadata fetching metadata for all topics from broker %s\n", broker.addr)
+ }
+ response, err := broker.GetMetadata(&MetadataRequest{Topics: topics})
+
+ switch err.(type) {
+ case nil:
+ // valid response, use it
+ shouldRetry, err := client.updateMetadata(response)
+ if shouldRetry {
+ Logger.Println("client/metadata found some partitions to be leaderless")
+ return retry(err) // note: err can be nil
+ }
+ return err
+
+ case PacketEncodingError:
+ // didn't even send, return the error
+ return err
+ default:
+ // some other error, remove that broker and try again
+ Logger.Println("client/metadata got error from broker while fetching metadata:", err)
+ _ = broker.Close()
+ client.deregisterBroker(broker)
+ }
+ }
+
+ Logger.Println("client/metadata no available broker to send metadata request to")
+ client.resurrectDeadBrokers()
+ return retry(ErrOutOfBrokers)
+}
+
+// if no fatal error, returns a list of topics that need retrying due to ErrLeaderNotAvailable
+func (client *client) updateMetadata(data *MetadataResponse) (retry bool, err error) {
+ client.lock.Lock()
+ defer client.lock.Unlock()
+
+ // For all the brokers we received:
+ // - if it is a new ID, save it
+ // - if it is an existing ID, but the address we have is stale, discard the old one and save it
+ // - otherwise ignore it, replacing our existing one would just bounce the connection
+ for _, broker := range data.Brokers {
+ client.registerBroker(broker)
+ }
+
+ for _, topic := range data.Topics {
+ delete(client.metadata, topic.Name)
+ delete(client.cachedPartitionsResults, topic.Name)
+
+ switch topic.Err {
+ case ErrNoError:
+ break
+ case ErrInvalidTopic, ErrTopicAuthorizationFailed: // don't retry, don't store partial results
+ err = topic.Err
+ continue
+ case ErrUnknownTopicOrPartition: // retry, do not store partial partition results
+ err = topic.Err
+ retry = true
+ continue
+ case ErrLeaderNotAvailable: // retry, but store partial partition results
+ retry = true
+ break
+ default: // don't retry, don't store partial results
+ Logger.Printf("Unexpected topic-level metadata error: %s", topic.Err)
+ err = topic.Err
+ continue
+ }
+
+ client.metadata[topic.Name] = make(map[int32]*PartitionMetadata, len(topic.Partitions))
+ for _, partition := range topic.Partitions {
+ client.metadata[topic.Name][partition.ID] = partition
+ if partition.Err == ErrLeaderNotAvailable {
+ retry = true
+ }
+ }
+
+ var partitionCache [maxPartitionIndex][]int32
+ partitionCache[allPartitions] = client.setPartitionCache(topic.Name, allPartitions)
+ partitionCache[writablePartitions] = client.setPartitionCache(topic.Name, writablePartitions)
+ client.cachedPartitionsResults[topic.Name] = partitionCache
+ }
+
+ return
+}
+
+func (client *client) cachedCoordinator(consumerGroup string) *Broker {
+ client.lock.RLock()
+ defer client.lock.RUnlock()
+ if coordinatorID, ok := client.coordinators[consumerGroup]; ok {
+ return client.brokers[coordinatorID]
+ }
+ return nil
+}
+
+func (client *client) getConsumerMetadata(consumerGroup string, attemptsRemaining int) (*ConsumerMetadataResponse, error) {
+ retry := func(err error) (*ConsumerMetadataResponse, error) {
+ if attemptsRemaining > 0 {
+ Logger.Printf("client/coordinator retrying after %dms... (%d attempts remaining)\n", client.conf.Metadata.Retry.Backoff/time.Millisecond, attemptsRemaining)
+ time.Sleep(client.conf.Metadata.Retry.Backoff)
+ return client.getConsumerMetadata(consumerGroup, attemptsRemaining-1)
+ }
+ return nil, err
+ }
+
+ for broker := client.any(); broker != nil; broker = client.any() {
+ Logger.Printf("client/coordinator requesting coordinator for consumergroup %s from %s\n", consumerGroup, broker.Addr())
+
+ request := new(ConsumerMetadataRequest)
+ request.ConsumerGroup = consumerGroup
+
+ response, err := broker.GetConsumerMetadata(request)
+
+ if err != nil {
+ Logger.Printf("client/coordinator request to broker %s failed: %s\n", broker.Addr(), err)
+
+ switch err.(type) {
+ case PacketEncodingError:
+ return nil, err
+ default:
+ _ = broker.Close()
+ client.deregisterBroker(broker)
+ continue
+ }
+ }
+
+ switch response.Err {
+ case ErrNoError:
+ Logger.Printf("client/coordinator coordinator for consumergroup %s is #%d (%s)\n", consumerGroup, response.Coordinator.ID(), response.Coordinator.Addr())
+ return response, nil
+
+ case ErrConsumerCoordinatorNotAvailable:
+ Logger.Printf("client/coordinator coordinator for consumer group %s is not available\n", consumerGroup)
+
+ // This is very ugly, but this scenario will only happen once per cluster.
+ // The __consumer_offsets topic only has to be created one time.
+ // The number of partitions not configurable, but partition 0 should always exist.
+ if _, err := client.Leader("__consumer_offsets", 0); err != nil {
+ Logger.Printf("client/coordinator the __consumer_offsets topic is not initialized completely yet. Waiting 2 seconds...\n")
+ time.Sleep(2 * time.Second)
+ }
+
+ return retry(ErrConsumerCoordinatorNotAvailable)
+ default:
+ return nil, response.Err
+ }
+ }
+
+ Logger.Println("client/coordinator no available broker to send consumer metadata request to")
+ client.resurrectDeadBrokers()
+ return retry(ErrOutOfBrokers)
+}
diff --git a/vendor/github.com/Shopify/sarama/config.go b/vendor/github.com/Shopify/sarama/config.go
new file mode 100644
index 00000000..606a4fab
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/config.go
@@ -0,0 +1,423 @@
+package sarama
+
+import (
+ "crypto/tls"
+ "regexp"
+ "time"
+
+ "github.com/rcrowley/go-metrics"
+)
+
+const defaultClientID = "sarama"
+
+var validID = regexp.MustCompile(`\A[A-Za-z0-9._-]+\z`)
+
+// Config is used to pass multiple configuration options to Sarama's constructors.
+type Config struct {
+ // Net is the namespace for network-level properties used by the Broker, and
+ // shared by the Client/Producer/Consumer.
+ Net struct {
+ // How many outstanding requests a connection is allowed to have before
+ // sending on it blocks (default 5).
+ MaxOpenRequests int
+
+ // All three of the below configurations are similar to the
+ // `socket.timeout.ms` setting in JVM kafka. All of them default
+ // to 30 seconds.
+ DialTimeout time.Duration // How long to wait for the initial connection.
+ ReadTimeout time.Duration // How long to wait for a response.
+ WriteTimeout time.Duration // How long to wait for a transmit.
+
+ TLS struct {
+ // Whether or not to use TLS when connecting to the broker
+ // (defaults to false).
+ Enable bool
+ // The TLS configuration to use for secure connections if
+ // enabled (defaults to nil).
+ Config *tls.Config
+ }
+
+ // SASL based authentication with broker. While there are multiple SASL authentication methods
+ // the current implementation is limited to plaintext (SASL/PLAIN) authentication
+ SASL struct {
+ // Whether or not to use SASL authentication when connecting to the broker
+ // (defaults to false).
+ Enable bool
+ // Whether or not to send the Kafka SASL handshake first if enabled
+ // (defaults to true). You should only set this to false if you're using
+ // a non-Kafka SASL proxy.
+ Handshake bool
+ //username and password for SASL/PLAIN authentication
+ User string
+ Password string
+ }
+
+ // KeepAlive specifies the keep-alive period for an active network connection.
+ // If zero, keep-alives are disabled. (default is 0: disabled).
+ KeepAlive time.Duration
+ }
+
+ // Metadata is the namespace for metadata management properties used by the
+ // Client, and shared by the Producer/Consumer.
+ Metadata struct {
+ Retry struct {
+ // The total number of times to retry a metadata request when the
+ // cluster is in the middle of a leader election (default 3).
+ Max int
+ // How long to wait for leader election to occur before retrying
+ // (default 250ms). Similar to the JVM's `retry.backoff.ms`.
+ Backoff time.Duration
+ }
+ // How frequently to refresh the cluster metadata in the background.
+ // Defaults to 10 minutes. Set to 0 to disable. Similar to
+ // `topic.metadata.refresh.interval.ms` in the JVM version.
+ RefreshFrequency time.Duration
+ }
+
+ // Producer is the namespace for configuration related to producing messages,
+ // used by the Producer.
+ Producer struct {
+ // The maximum permitted size of a message (defaults to 1000000). Should be
+ // set equal to or smaller than the broker's `message.max.bytes`.
+ MaxMessageBytes int
+ // The level of acknowledgement reliability needed from the broker (defaults
+ // to WaitForLocal). Equivalent to the `request.required.acks` setting of the
+ // JVM producer.
+ RequiredAcks RequiredAcks
+ // The maximum duration the broker will wait the receipt of the number of
+ // RequiredAcks (defaults to 10 seconds). This is only relevant when
+ // RequiredAcks is set to WaitForAll or a number > 1. Only supports
+ // millisecond resolution, nanoseconds will be truncated. Equivalent to
+ // the JVM producer's `request.timeout.ms` setting.
+ Timeout time.Duration
+ // The type of compression to use on messages (defaults to no compression).
+ // Similar to `compression.codec` setting of the JVM producer.
+ Compression CompressionCodec
+ // Generates partitioners for choosing the partition to send messages to
+ // (defaults to hashing the message key). Similar to the `partitioner.class`
+ // setting for the JVM producer.
+ Partitioner PartitionerConstructor
+
+ // Return specifies what channels will be populated. If they are set to true,
+ // you must read from the respective channels to prevent deadlock. If,
+ // however, this config is used to create a `SyncProducer`, both must be set
+ // to true and you shall not read from the channels since the producer does
+ // this internally.
+ Return struct {
+ // If enabled, successfully delivered messages will be returned on the
+ // Successes channel (default disabled).
+ Successes bool
+
+ // If enabled, messages that failed to deliver will be returned on the
+ // Errors channel, including error (default enabled).
+ Errors bool
+ }
+
+ // The following config options control how often messages are batched up and
+ // sent to the broker. By default, messages are sent as fast as possible, and
+ // all messages received while the current batch is in-flight are placed
+ // into the subsequent batch.
+ Flush struct {
+ // The best-effort number of bytes needed to trigger a flush. Use the
+ // global sarama.MaxRequestSize to set a hard upper limit.
+ Bytes int
+ // The best-effort number of messages needed to trigger a flush. Use
+ // `MaxMessages` to set a hard upper limit.
+ Messages int
+ // The best-effort frequency of flushes. Equivalent to
+ // `queue.buffering.max.ms` setting of JVM producer.
+ Frequency time.Duration
+ // The maximum number of messages the producer will send in a single
+ // broker request. Defaults to 0 for unlimited. Similar to
+ // `queue.buffering.max.messages` in the JVM producer.
+ MaxMessages int
+ }
+
+ Retry struct {
+ // The total number of times to retry sending a message (default 3).
+ // Similar to the `message.send.max.retries` setting of the JVM producer.
+ Max int
+ // How long to wait for the cluster to settle between retries
+ // (default 100ms). Similar to the `retry.backoff.ms` setting of the
+ // JVM producer.
+ Backoff time.Duration
+ }
+ }
+
+ // Consumer is the namespace for configuration related to consuming messages,
+ // used by the Consumer.
+ //
+ // Note that Sarama's Consumer type does not currently support automatic
+ // consumer-group rebalancing and offset tracking. For Zookeeper-based
+ // tracking (Kafka 0.8.2 and earlier), the https://github.com/wvanbergen/kafka
+ // library builds on Sarama to add this support. For Kafka-based tracking
+ // (Kafka 0.9 and later), the https://github.com/bsm/sarama-cluster library
+ // builds on Sarama to add this support.
+ Consumer struct {
+ Retry struct {
+ // How long to wait after a failing to read from a partition before
+ // trying again (default 2s).
+ Backoff time.Duration
+ }
+
+ // Fetch is the namespace for controlling how many bytes are retrieved by any
+ // given request.
+ Fetch struct {
+ // The minimum number of message bytes to fetch in a request - the broker
+ // will wait until at least this many are available. The default is 1,
+ // as 0 causes the consumer to spin when no messages are available.
+ // Equivalent to the JVM's `fetch.min.bytes`.
+ Min int32
+ // The default number of message bytes to fetch from the broker in each
+ // request (default 32768). This should be larger than the majority of
+ // your messages, or else the consumer will spend a lot of time
+ // negotiating sizes and not actually consuming. Similar to the JVM's
+ // `fetch.message.max.bytes`.
+ Default int32
+ // The maximum number of message bytes to fetch from the broker in a
+ // single request. Messages larger than this will return
+ // ErrMessageTooLarge and will not be consumable, so you must be sure
+ // this is at least as large as your largest message. Defaults to 0
+ // (no limit). Similar to the JVM's `fetch.message.max.bytes`. The
+ // global `sarama.MaxResponseSize` still applies.
+ Max int32
+ }
+ // The maximum amount of time the broker will wait for Consumer.Fetch.Min
+ // bytes to become available before it returns fewer than that anyways. The
+ // default is 250ms, since 0 causes the consumer to spin when no events are
+ // available. 100-500ms is a reasonable range for most cases. Kafka only
+ // supports precision up to milliseconds; nanoseconds will be truncated.
+ // Equivalent to the JVM's `fetch.wait.max.ms`.
+ MaxWaitTime time.Duration
+
+ // The maximum amount of time the consumer expects a message takes to process
+ // for the user. If writing to the Messages channel takes longer than this,
+ // that partition will stop fetching more messages until it can proceed again.
+ // Note that, since the Messages channel is buffered, the actual grace time is
+ // (MaxProcessingTime * ChanneBufferSize). Defaults to 100ms.
+ MaxProcessingTime time.Duration
+
+ // Return specifies what channels will be populated. If they are set to true,
+ // you must read from them to prevent deadlock.
+ Return struct {
+ // If enabled, any errors that occurred while consuming are returned on
+ // the Errors channel (default disabled).
+ Errors bool
+ }
+
+ // Offsets specifies configuration for how and when to commit consumed
+ // offsets. This currently requires the manual use of an OffsetManager
+ // but will eventually be automated.
+ Offsets struct {
+ // How frequently to commit updated offsets. Defaults to 1s.
+ CommitInterval time.Duration
+
+ // The initial offset to use if no offset was previously committed.
+ // Should be OffsetNewest or OffsetOldest. Defaults to OffsetNewest.
+ Initial int64
+
+ // The retention duration for committed offsets. If zero, disabled
+ // (in which case the `offsets.retention.minutes` option on the
+ // broker will be used). Kafka only supports precision up to
+ // milliseconds; nanoseconds will be truncated. Requires Kafka
+ // broker version 0.9.0 or later.
+ // (default is 0: disabled).
+ Retention time.Duration
+ }
+ }
+
+ // A user-provided string sent with every request to the brokers for logging,
+ // debugging, and auditing purposes. Defaults to "sarama", but you should
+ // probably set it to something specific to your application.
+ ClientID string
+ // The number of events to buffer in internal and external channels. This
+ // permits the producer and consumer to continue processing some messages
+ // in the background while user code is working, greatly improving throughput.
+ // Defaults to 256.
+ ChannelBufferSize int
+ // The version of Kafka that Sarama will assume it is running against.
+ // Defaults to the oldest supported stable version. Since Kafka provides
+ // backwards-compatibility, setting it to a version older than you have
+ // will not break anything, although it may prevent you from using the
+ // latest features. Setting it to a version greater than you are actually
+ // running may lead to random breakage.
+ Version KafkaVersion
+ // The registry to define metrics into.
+ // Defaults to a local registry.
+ // If you want to disable metrics gathering, set "metrics.UseNilMetrics" to "true"
+ // prior to starting Sarama.
+ // See Examples on how to use the metrics registry
+ MetricRegistry metrics.Registry
+}
+
+// NewConfig returns a new configuration instance with sane defaults.
+func NewConfig() *Config {
+ c := &Config{}
+
+ c.Net.MaxOpenRequests = 5
+ c.Net.DialTimeout = 30 * time.Second
+ c.Net.ReadTimeout = 30 * time.Second
+ c.Net.WriteTimeout = 30 * time.Second
+ c.Net.SASL.Handshake = true
+
+ c.Metadata.Retry.Max = 3
+ c.Metadata.Retry.Backoff = 250 * time.Millisecond
+ c.Metadata.RefreshFrequency = 10 * time.Minute
+
+ c.Producer.MaxMessageBytes = 1000000
+ c.Producer.RequiredAcks = WaitForLocal
+ c.Producer.Timeout = 10 * time.Second
+ c.Producer.Partitioner = NewHashPartitioner
+ c.Producer.Retry.Max = 3
+ c.Producer.Retry.Backoff = 100 * time.Millisecond
+ c.Producer.Return.Errors = true
+
+ c.Consumer.Fetch.Min = 1
+ c.Consumer.Fetch.Default = 32768
+ c.Consumer.Retry.Backoff = 2 * time.Second
+ c.Consumer.MaxWaitTime = 250 * time.Millisecond
+ c.Consumer.MaxProcessingTime = 100 * time.Millisecond
+ c.Consumer.Return.Errors = false
+ c.Consumer.Offsets.CommitInterval = 1 * time.Second
+ c.Consumer.Offsets.Initial = OffsetNewest
+
+ c.ClientID = defaultClientID
+ c.ChannelBufferSize = 256
+ c.Version = minVersion
+ c.MetricRegistry = metrics.NewRegistry()
+
+ return c
+}
+
+// Validate checks a Config instance. It will return a
+// ConfigurationError if the specified values don't make sense.
+func (c *Config) Validate() error {
+ // some configuration values should be warned on but not fail completely, do those first
+ if c.Net.TLS.Enable == false && c.Net.TLS.Config != nil {
+ Logger.Println("Net.TLS is disabled but a non-nil configuration was provided.")
+ }
+ if c.Net.SASL.Enable == false {
+ if c.Net.SASL.User != "" {
+ Logger.Println("Net.SASL is disabled but a non-empty username was provided.")
+ }
+ if c.Net.SASL.Password != "" {
+ Logger.Println("Net.SASL is disabled but a non-empty password was provided.")
+ }
+ }
+ if c.Producer.RequiredAcks > 1 {
+ Logger.Println("Producer.RequiredAcks > 1 is deprecated and will raise an exception with kafka >= 0.8.2.0.")
+ }
+ if c.Producer.MaxMessageBytes >= int(MaxRequestSize) {
+ Logger.Println("Producer.MaxMessageBytes must be smaller than MaxRequestSize; it will be ignored.")
+ }
+ if c.Producer.Flush.Bytes >= int(MaxRequestSize) {
+ Logger.Println("Producer.Flush.Bytes must be smaller than MaxRequestSize; it will be ignored.")
+ }
+ if (c.Producer.Flush.Bytes > 0 || c.Producer.Flush.Messages > 0) && c.Producer.Flush.Frequency == 0 {
+ Logger.Println("Producer.Flush: Bytes or Messages are set, but Frequency is not; messages may not get flushed.")
+ }
+ if c.Producer.Timeout%time.Millisecond != 0 {
+ Logger.Println("Producer.Timeout only supports millisecond resolution; nanoseconds will be truncated.")
+ }
+ if c.Consumer.MaxWaitTime < 100*time.Millisecond {
+ Logger.Println("Consumer.MaxWaitTime is very low, which can cause high CPU and network usage. See documentation for details.")
+ }
+ if c.Consumer.MaxWaitTime%time.Millisecond != 0 {
+ Logger.Println("Consumer.MaxWaitTime only supports millisecond precision; nanoseconds will be truncated.")
+ }
+ if c.Consumer.Offsets.Retention%time.Millisecond != 0 {
+ Logger.Println("Consumer.Offsets.Retention only supports millisecond precision; nanoseconds will be truncated.")
+ }
+ if c.ClientID == defaultClientID {
+ Logger.Println("ClientID is the default of 'sarama', you should consider setting it to something application-specific.")
+ }
+
+ // validate Net values
+ switch {
+ case c.Net.MaxOpenRequests <= 0:
+ return ConfigurationError("Net.MaxOpenRequests must be > 0")
+ case c.Net.DialTimeout <= 0:
+ return ConfigurationError("Net.DialTimeout must be > 0")
+ case c.Net.ReadTimeout <= 0:
+ return ConfigurationError("Net.ReadTimeout must be > 0")
+ case c.Net.WriteTimeout <= 0:
+ return ConfigurationError("Net.WriteTimeout must be > 0")
+ case c.Net.KeepAlive < 0:
+ return ConfigurationError("Net.KeepAlive must be >= 0")
+ case c.Net.SASL.Enable == true && c.Net.SASL.User == "":
+ return ConfigurationError("Net.SASL.User must not be empty when SASL is enabled")
+ case c.Net.SASL.Enable == true && c.Net.SASL.Password == "":
+ return ConfigurationError("Net.SASL.Password must not be empty when SASL is enabled")
+ }
+
+ // validate the Metadata values
+ switch {
+ case c.Metadata.Retry.Max < 0:
+ return ConfigurationError("Metadata.Retry.Max must be >= 0")
+ case c.Metadata.Retry.Backoff < 0:
+ return ConfigurationError("Metadata.Retry.Backoff must be >= 0")
+ case c.Metadata.RefreshFrequency < 0:
+ return ConfigurationError("Metadata.RefreshFrequency must be >= 0")
+ }
+
+ // validate the Producer values
+ switch {
+ case c.Producer.MaxMessageBytes <= 0:
+ return ConfigurationError("Producer.MaxMessageBytes must be > 0")
+ case c.Producer.RequiredAcks < -1:
+ return ConfigurationError("Producer.RequiredAcks must be >= -1")
+ case c.Producer.Timeout <= 0:
+ return ConfigurationError("Producer.Timeout must be > 0")
+ case c.Producer.Partitioner == nil:
+ return ConfigurationError("Producer.Partitioner must not be nil")
+ case c.Producer.Flush.Bytes < 0:
+ return ConfigurationError("Producer.Flush.Bytes must be >= 0")
+ case c.Producer.Flush.Messages < 0:
+ return ConfigurationError("Producer.Flush.Messages must be >= 0")
+ case c.Producer.Flush.Frequency < 0:
+ return ConfigurationError("Producer.Flush.Frequency must be >= 0")
+ case c.Producer.Flush.MaxMessages < 0:
+ return ConfigurationError("Producer.Flush.MaxMessages must be >= 0")
+ case c.Producer.Flush.MaxMessages > 0 && c.Producer.Flush.MaxMessages < c.Producer.Flush.Messages:
+ return ConfigurationError("Producer.Flush.MaxMessages must be >= Producer.Flush.Messages when set")
+ case c.Producer.Retry.Max < 0:
+ return ConfigurationError("Producer.Retry.Max must be >= 0")
+ case c.Producer.Retry.Backoff < 0:
+ return ConfigurationError("Producer.Retry.Backoff must be >= 0")
+ }
+
+ if c.Producer.Compression == CompressionLZ4 && !c.Version.IsAtLeast(V0_10_0_0) {
+ return ConfigurationError("lz4 compression requires Version >= V0_10_0_0")
+ }
+
+ // validate the Consumer values
+ switch {
+ case c.Consumer.Fetch.Min <= 0:
+ return ConfigurationError("Consumer.Fetch.Min must be > 0")
+ case c.Consumer.Fetch.Default <= 0:
+ return ConfigurationError("Consumer.Fetch.Default must be > 0")
+ case c.Consumer.Fetch.Max < 0:
+ return ConfigurationError("Consumer.Fetch.Max must be >= 0")
+ case c.Consumer.MaxWaitTime < 1*time.Millisecond:
+ return ConfigurationError("Consumer.MaxWaitTime must be >= 1ms")
+ case c.Consumer.MaxProcessingTime <= 0:
+ return ConfigurationError("Consumer.MaxProcessingTime must be > 0")
+ case c.Consumer.Retry.Backoff < 0:
+ return ConfigurationError("Consumer.Retry.Backoff must be >= 0")
+ case c.Consumer.Offsets.CommitInterval <= 0:
+ return ConfigurationError("Consumer.Offsets.CommitInterval must be > 0")
+ case c.Consumer.Offsets.Initial != OffsetOldest && c.Consumer.Offsets.Initial != OffsetNewest:
+ return ConfigurationError("Consumer.Offsets.Initial must be OffsetOldest or OffsetNewest")
+
+ }
+
+ // validate misc shared values
+ switch {
+ case c.ChannelBufferSize < 0:
+ return ConfigurationError("ChannelBufferSize must be >= 0")
+ case !validID.MatchString(c.ClientID):
+ return ConfigurationError("ClientID is invalid")
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/Shopify/sarama/consumer.go b/vendor/github.com/Shopify/sarama/consumer.go
new file mode 100644
index 00000000..c82b994c
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/consumer.go
@@ -0,0 +1,741 @@
+package sarama
+
+import (
+ "errors"
+ "fmt"
+ "sync"
+ "sync/atomic"
+ "time"
+)
+
+// ConsumerMessage encapsulates a Kafka message returned by the consumer.
+type ConsumerMessage struct {
+ Key, Value []byte
+ Topic string
+ Partition int32
+ Offset int64
+ Timestamp time.Time // only set if kafka is version 0.10+, inner message timestamp
+ BlockTimestamp time.Time // only set if kafka is version 0.10+, outer (compressed) block timestamp
+}
+
+// ConsumerError is what is provided to the user when an error occurs.
+// It wraps an error and includes the topic and partition.
+type ConsumerError struct {
+ Topic string
+ Partition int32
+ Err error
+}
+
+func (ce ConsumerError) Error() string {
+ return fmt.Sprintf("kafka: error while consuming %s/%d: %s", ce.Topic, ce.Partition, ce.Err)
+}
+
+// ConsumerErrors is a type that wraps a batch of errors and implements the Error interface.
+// It can be returned from the PartitionConsumer's Close methods to avoid the need to manually drain errors
+// when stopping.
+type ConsumerErrors []*ConsumerError
+
+func (ce ConsumerErrors) Error() string {
+ return fmt.Sprintf("kafka: %d errors while consuming", len(ce))
+}
+
+// Consumer manages PartitionConsumers which process Kafka messages from brokers. You MUST call Close()
+// on a consumer to avoid leaks, it will not be garbage-collected automatically when it passes out of
+// scope.
+//
+// Sarama's Consumer type does not currently support automatic consumer-group rebalancing and offset tracking.
+// For Zookeeper-based tracking (Kafka 0.8.2 and earlier), the https://github.com/wvanbergen/kafka library
+// builds on Sarama to add this support. For Kafka-based tracking (Kafka 0.9 and later), the
+// https://github.com/bsm/sarama-cluster library builds on Sarama to add this support.
+type Consumer interface {
+
+ // Topics returns the set of available topics as retrieved from the cluster
+ // metadata. This method is the same as Client.Topics(), and is provided for
+ // convenience.
+ Topics() ([]string, error)
+
+ // Partitions returns the sorted list of all partition IDs for the given topic.
+ // This method is the same as Client.Partitions(), and is provided for convenience.
+ Partitions(topic string) ([]int32, error)
+
+ // ConsumePartition creates a PartitionConsumer on the given topic/partition with
+ // the given offset. It will return an error if this Consumer is already consuming
+ // on the given topic/partition. Offset can be a literal offset, or OffsetNewest
+ // or OffsetOldest
+ ConsumePartition(topic string, partition int32, offset int64) (PartitionConsumer, error)
+
+ // HighWaterMarks returns the current high water marks for each topic and partition.
+ // Consistency between partitions is not guaranteed since high water marks are updated separately.
+ HighWaterMarks() map[string]map[int32]int64
+
+ // Close shuts down the consumer. It must be called after all child
+ // PartitionConsumers have already been closed.
+ Close() error
+}
+
+type consumer struct {
+ client Client
+ conf *Config
+ ownClient bool
+
+ lock sync.Mutex
+ children map[string]map[int32]*partitionConsumer
+ brokerConsumers map[*Broker]*brokerConsumer
+}
+
+// NewConsumer creates a new consumer using the given broker addresses and configuration.
+func NewConsumer(addrs []string, config *Config) (Consumer, error) {
+ client, err := NewClient(addrs, config)
+ if err != nil {
+ return nil, err
+ }
+
+ c, err := NewConsumerFromClient(client)
+ if err != nil {
+ return nil, err
+ }
+ c.(*consumer).ownClient = true
+ return c, nil
+}
+
+// NewConsumerFromClient creates a new consumer using the given client. It is still
+// necessary to call Close() on the underlying client when shutting down this consumer.
+func NewConsumerFromClient(client Client) (Consumer, error) {
+ // Check that we are not dealing with a closed Client before processing any other arguments
+ if client.Closed() {
+ return nil, ErrClosedClient
+ }
+
+ c := &consumer{
+ client: client,
+ conf: client.Config(),
+ children: make(map[string]map[int32]*partitionConsumer),
+ brokerConsumers: make(map[*Broker]*brokerConsumer),
+ }
+
+ return c, nil
+}
+
+func (c *consumer) Close() error {
+ if c.ownClient {
+ return c.client.Close()
+ }
+ return nil
+}
+
+func (c *consumer) Topics() ([]string, error) {
+ return c.client.Topics()
+}
+
+func (c *consumer) Partitions(topic string) ([]int32, error) {
+ return c.client.Partitions(topic)
+}
+
+func (c *consumer) ConsumePartition(topic string, partition int32, offset int64) (PartitionConsumer, error) {
+ child := &partitionConsumer{
+ consumer: c,
+ conf: c.conf,
+ topic: topic,
+ partition: partition,
+ messages: make(chan *ConsumerMessage, c.conf.ChannelBufferSize),
+ errors: make(chan *ConsumerError, c.conf.ChannelBufferSize),
+ feeder: make(chan *FetchResponse, 1),
+ trigger: make(chan none, 1),
+ dying: make(chan none),
+ fetchSize: c.conf.Consumer.Fetch.Default,
+ }
+
+ if err := child.chooseStartingOffset(offset); err != nil {
+ return nil, err
+ }
+
+ var leader *Broker
+ var err error
+ if leader, err = c.client.Leader(child.topic, child.partition); err != nil {
+ return nil, err
+ }
+
+ if err := c.addChild(child); err != nil {
+ return nil, err
+ }
+
+ go withRecover(child.dispatcher)
+ go withRecover(child.responseFeeder)
+
+ child.broker = c.refBrokerConsumer(leader)
+ child.broker.input <- child
+
+ return child, nil
+}
+
+func (c *consumer) HighWaterMarks() map[string]map[int32]int64 {
+ c.lock.Lock()
+ defer c.lock.Unlock()
+
+ hwms := make(map[string]map[int32]int64)
+ for topic, p := range c.children {
+ hwm := make(map[int32]int64, len(p))
+ for partition, pc := range p {
+ hwm[partition] = pc.HighWaterMarkOffset()
+ }
+ hwms[topic] = hwm
+ }
+
+ return hwms
+}
+
+func (c *consumer) addChild(child *partitionConsumer) error {
+ c.lock.Lock()
+ defer c.lock.Unlock()
+
+ topicChildren := c.children[child.topic]
+ if topicChildren == nil {
+ topicChildren = make(map[int32]*partitionConsumer)
+ c.children[child.topic] = topicChildren
+ }
+
+ if topicChildren[child.partition] != nil {
+ return ConfigurationError("That topic/partition is already being consumed")
+ }
+
+ topicChildren[child.partition] = child
+ return nil
+}
+
+func (c *consumer) removeChild(child *partitionConsumer) {
+ c.lock.Lock()
+ defer c.lock.Unlock()
+
+ delete(c.children[child.topic], child.partition)
+}
+
+func (c *consumer) refBrokerConsumer(broker *Broker) *brokerConsumer {
+ c.lock.Lock()
+ defer c.lock.Unlock()
+
+ bc := c.brokerConsumers[broker]
+ if bc == nil {
+ bc = c.newBrokerConsumer(broker)
+ c.brokerConsumers[broker] = bc
+ }
+
+ bc.refs++
+
+ return bc
+}
+
+func (c *consumer) unrefBrokerConsumer(brokerWorker *brokerConsumer) {
+ c.lock.Lock()
+ defer c.lock.Unlock()
+
+ brokerWorker.refs--
+
+ if brokerWorker.refs == 0 {
+ close(brokerWorker.input)
+ if c.brokerConsumers[brokerWorker.broker] == brokerWorker {
+ delete(c.brokerConsumers, brokerWorker.broker)
+ }
+ }
+}
+
+func (c *consumer) abandonBrokerConsumer(brokerWorker *brokerConsumer) {
+ c.lock.Lock()
+ defer c.lock.Unlock()
+
+ delete(c.brokerConsumers, brokerWorker.broker)
+}
+
+// PartitionConsumer
+
+// PartitionConsumer processes Kafka messages from a given topic and partition. You MUST call Close()
+// or AsyncClose() on a PartitionConsumer to avoid leaks, it will not be garbage-collected automatically
+// when it passes out of scope.
+//
+// The simplest way of using a PartitionConsumer is to loop over its Messages channel using a for/range
+// loop. The PartitionConsumer will only stop itself in one case: when the offset being consumed is reported
+// as out of range by the brokers. In this case you should decide what you want to do (try a different offset,
+// notify a human, etc) and handle it appropriately. For all other error cases, it will just keep retrying.
+// By default, it logs these errors to sarama.Logger; if you want to be notified directly of all errors, set
+// your config's Consumer.Return.Errors to true and read from the Errors channel, using a select statement
+// or a separate goroutine. Check out the Consumer examples to see implementations of these different approaches.
+type PartitionConsumer interface {
+
+ // AsyncClose initiates a shutdown of the PartitionConsumer. This method will
+ // return immediately, after which you should wait until the 'messages' and
+ // 'errors' channel are drained. It is required to call this function, or
+ // Close before a consumer object passes out of scope, as it will otherwise
+ // leak memory. You must call this before calling Close on the underlying client.
+ AsyncClose()
+
+ // Close stops the PartitionConsumer from fetching messages. It is required to
+ // call this function (or AsyncClose) before a consumer object passes out of
+ // scope, as it will otherwise leak memory. You must call this before calling
+ // Close on the underlying client.
+ Close() error
+
+ // Messages returns the read channel for the messages that are returned by
+ // the broker.
+ Messages() <-chan *ConsumerMessage
+
+ // Errors returns a read channel of errors that occurred during consuming, if
+ // enabled. By default, errors are logged and not returned over this channel.
+ // If you want to implement any custom error handling, set your config's
+ // Consumer.Return.Errors setting to true, and read from this channel.
+ Errors() <-chan *ConsumerError
+
+ // HighWaterMarkOffset returns the high water mark offset of the partition,
+ // i.e. the offset that will be used for the next message that will be produced.
+ // You can use this to determine how far behind the processing is.
+ HighWaterMarkOffset() int64
+}
+
+type partitionConsumer struct {
+ highWaterMarkOffset int64 // must be at the top of the struct because https://golang.org/pkg/sync/atomic/#pkg-note-BUG
+ consumer *consumer
+ conf *Config
+ topic string
+ partition int32
+
+ broker *brokerConsumer
+ messages chan *ConsumerMessage
+ errors chan *ConsumerError
+ feeder chan *FetchResponse
+
+ trigger, dying chan none
+ responseResult error
+
+ fetchSize int32
+ offset int64
+}
+
+var errTimedOut = errors.New("timed out feeding messages to the user") // not user-facing
+
+func (child *partitionConsumer) sendError(err error) {
+ cErr := &ConsumerError{
+ Topic: child.topic,
+ Partition: child.partition,
+ Err: err,
+ }
+
+ if child.conf.Consumer.Return.Errors {
+ child.errors <- cErr
+ } else {
+ Logger.Println(cErr)
+ }
+}
+
+func (child *partitionConsumer) dispatcher() {
+ for range child.trigger {
+ select {
+ case <-child.dying:
+ close(child.trigger)
+ case <-time.After(child.conf.Consumer.Retry.Backoff):
+ if child.broker != nil {
+ child.consumer.unrefBrokerConsumer(child.broker)
+ child.broker = nil
+ }
+
+ Logger.Printf("consumer/%s/%d finding new broker\n", child.topic, child.partition)
+ if err := child.dispatch(); err != nil {
+ child.sendError(err)
+ child.trigger <- none{}
+ }
+ }
+ }
+
+ if child.broker != nil {
+ child.consumer.unrefBrokerConsumer(child.broker)
+ }
+ child.consumer.removeChild(child)
+ close(child.feeder)
+}
+
+func (child *partitionConsumer) dispatch() error {
+ if err := child.consumer.client.RefreshMetadata(child.topic); err != nil {
+ return err
+ }
+
+ var leader *Broker
+ var err error
+ if leader, err = child.consumer.client.Leader(child.topic, child.partition); err != nil {
+ return err
+ }
+
+ child.broker = child.consumer.refBrokerConsumer(leader)
+
+ child.broker.input <- child
+
+ return nil
+}
+
+func (child *partitionConsumer) chooseStartingOffset(offset int64) error {
+ newestOffset, err := child.consumer.client.GetOffset(child.topic, child.partition, OffsetNewest)
+ if err != nil {
+ return err
+ }
+ oldestOffset, err := child.consumer.client.GetOffset(child.topic, child.partition, OffsetOldest)
+ if err != nil {
+ return err
+ }
+
+ switch {
+ case offset == OffsetNewest:
+ child.offset = newestOffset
+ case offset == OffsetOldest:
+ child.offset = oldestOffset
+ case offset >= oldestOffset && offset <= newestOffset:
+ child.offset = offset
+ default:
+ return ErrOffsetOutOfRange
+ }
+
+ return nil
+}
+
+func (child *partitionConsumer) Messages() <-chan *ConsumerMessage {
+ return child.messages
+}
+
+func (child *partitionConsumer) Errors() <-chan *ConsumerError {
+ return child.errors
+}
+
+func (child *partitionConsumer) AsyncClose() {
+ // this triggers whatever broker owns this child to abandon it and close its trigger channel, which causes
+ // the dispatcher to exit its loop, which removes it from the consumer then closes its 'messages' and
+ // 'errors' channel (alternatively, if the child is already at the dispatcher for some reason, that will
+ // also just close itself)
+ close(child.dying)
+}
+
+func (child *partitionConsumer) Close() error {
+ child.AsyncClose()
+
+ go withRecover(func() {
+ for range child.messages {
+ // drain
+ }
+ })
+
+ var errors ConsumerErrors
+ for err := range child.errors {
+ errors = append(errors, err)
+ }
+
+ if len(errors) > 0 {
+ return errors
+ }
+ return nil
+}
+
+func (child *partitionConsumer) HighWaterMarkOffset() int64 {
+ return atomic.LoadInt64(&child.highWaterMarkOffset)
+}
+
+func (child *partitionConsumer) responseFeeder() {
+ var msgs []*ConsumerMessage
+ expiryTimer := time.NewTimer(child.conf.Consumer.MaxProcessingTime)
+ expireTimedOut := false
+
+feederLoop:
+ for response := range child.feeder {
+ msgs, child.responseResult = child.parseResponse(response)
+
+ for i, msg := range msgs {
+ if !expiryTimer.Stop() && !expireTimedOut {
+ // expiryTimer was expired; clear out the waiting msg
+ <-expiryTimer.C
+ }
+ expiryTimer.Reset(child.conf.Consumer.MaxProcessingTime)
+ expireTimedOut = false
+
+ select {
+ case child.messages <- msg:
+ case <-expiryTimer.C:
+ expireTimedOut = true
+ child.responseResult = errTimedOut
+ child.broker.acks.Done()
+ for _, msg = range msgs[i:] {
+ child.messages <- msg
+ }
+ child.broker.input <- child
+ continue feederLoop
+ }
+ }
+
+ child.broker.acks.Done()
+ }
+
+ close(child.messages)
+ close(child.errors)
+}
+
+func (child *partitionConsumer) parseResponse(response *FetchResponse) ([]*ConsumerMessage, error) {
+ block := response.GetBlock(child.topic, child.partition)
+ if block == nil {
+ return nil, ErrIncompleteResponse
+ }
+
+ if block.Err != ErrNoError {
+ return nil, block.Err
+ }
+
+ if len(block.MsgSet.Messages) == 0 {
+ // We got no messages. If we got a trailing one then we need to ask for more data.
+ // Otherwise we just poll again and wait for one to be produced...
+ if block.MsgSet.PartialTrailingMessage {
+ if child.conf.Consumer.Fetch.Max > 0 && child.fetchSize == child.conf.Consumer.Fetch.Max {
+ // we can't ask for more data, we've hit the configured limit
+ child.sendError(ErrMessageTooLarge)
+ child.offset++ // skip this one so we can keep processing future messages
+ } else {
+ child.fetchSize *= 2
+ if child.conf.Consumer.Fetch.Max > 0 && child.fetchSize > child.conf.Consumer.Fetch.Max {
+ child.fetchSize = child.conf.Consumer.Fetch.Max
+ }
+ }
+ }
+
+ return nil, nil
+ }
+
+ // we got messages, reset our fetch size in case it was increased for a previous request
+ child.fetchSize = child.conf.Consumer.Fetch.Default
+ atomic.StoreInt64(&child.highWaterMarkOffset, block.HighWaterMarkOffset)
+
+ incomplete := false
+ prelude := true
+ var messages []*ConsumerMessage
+ for _, msgBlock := range block.MsgSet.Messages {
+
+ for _, msg := range msgBlock.Messages() {
+ offset := msg.Offset
+ if msg.Msg.Version >= 1 {
+ baseOffset := msgBlock.Offset - msgBlock.Messages()[len(msgBlock.Messages())-1].Offset
+ offset += baseOffset
+ }
+ if prelude && offset < child.offset {
+ continue
+ }
+ prelude = false
+
+ if offset >= child.offset {
+ messages = append(messages, &ConsumerMessage{
+ Topic: child.topic,
+ Partition: child.partition,
+ Key: msg.Msg.Key,
+ Value: msg.Msg.Value,
+ Offset: offset,
+ Timestamp: msg.Msg.Timestamp,
+ BlockTimestamp: msgBlock.Msg.Timestamp,
+ })
+ child.offset = offset + 1
+ } else {
+ incomplete = true
+ }
+ }
+
+ }
+
+ if incomplete || len(messages) == 0 {
+ return nil, ErrIncompleteResponse
+ }
+ return messages, nil
+}
+
+// brokerConsumer
+
+type brokerConsumer struct {
+ consumer *consumer
+ broker *Broker
+ input chan *partitionConsumer
+ newSubscriptions chan []*partitionConsumer
+ wait chan none
+ subscriptions map[*partitionConsumer]none
+ acks sync.WaitGroup
+ refs int
+}
+
+func (c *consumer) newBrokerConsumer(broker *Broker) *brokerConsumer {
+ bc := &brokerConsumer{
+ consumer: c,
+ broker: broker,
+ input: make(chan *partitionConsumer),
+ newSubscriptions: make(chan []*partitionConsumer),
+ wait: make(chan none),
+ subscriptions: make(map[*partitionConsumer]none),
+ refs: 0,
+ }
+
+ go withRecover(bc.subscriptionManager)
+ go withRecover(bc.subscriptionConsumer)
+
+ return bc
+}
+
+func (bc *brokerConsumer) subscriptionManager() {
+ var buffer []*partitionConsumer
+
+ // The subscriptionManager constantly accepts new subscriptions on `input` (even when the main subscriptionConsumer
+ // goroutine is in the middle of a network request) and batches it up. The main worker goroutine picks
+ // up a batch of new subscriptions between every network request by reading from `newSubscriptions`, so we give
+ // it nil if no new subscriptions are available. We also write to `wait` only when new subscriptions is available,
+ // so the main goroutine can block waiting for work if it has none.
+ for {
+ if len(buffer) > 0 {
+ select {
+ case event, ok := <-bc.input:
+ if !ok {
+ goto done
+ }
+ buffer = append(buffer, event)
+ case bc.newSubscriptions <- buffer:
+ buffer = nil
+ case bc.wait <- none{}:
+ }
+ } else {
+ select {
+ case event, ok := <-bc.input:
+ if !ok {
+ goto done
+ }
+ buffer = append(buffer, event)
+ case bc.newSubscriptions <- nil:
+ }
+ }
+ }
+
+done:
+ close(bc.wait)
+ if len(buffer) > 0 {
+ bc.newSubscriptions <- buffer
+ }
+ close(bc.newSubscriptions)
+}
+
+func (bc *brokerConsumer) subscriptionConsumer() {
+ <-bc.wait // wait for our first piece of work
+
+ // the subscriptionConsumer ensures we will get nil right away if no new subscriptions is available
+ for newSubscriptions := range bc.newSubscriptions {
+ bc.updateSubscriptions(newSubscriptions)
+
+ if len(bc.subscriptions) == 0 {
+ // We're about to be shut down or we're about to receive more subscriptions.
+ // Either way, the signal just hasn't propagated to our goroutine yet.
+ <-bc.wait
+ continue
+ }
+
+ response, err := bc.fetchNewMessages()
+
+ if err != nil {
+ Logger.Printf("consumer/broker/%d disconnecting due to error processing FetchRequest: %s\n", bc.broker.ID(), err)
+ bc.abort(err)
+ return
+ }
+
+ bc.acks.Add(len(bc.subscriptions))
+ for child := range bc.subscriptions {
+ child.feeder <- response
+ }
+ bc.acks.Wait()
+ bc.handleResponses()
+ }
+}
+
+func (bc *brokerConsumer) updateSubscriptions(newSubscriptions []*partitionConsumer) {
+ for _, child := range newSubscriptions {
+ bc.subscriptions[child] = none{}
+ Logger.Printf("consumer/broker/%d added subscription to %s/%d\n", bc.broker.ID(), child.topic, child.partition)
+ }
+
+ for child := range bc.subscriptions {
+ select {
+ case <-child.dying:
+ Logger.Printf("consumer/broker/%d closed dead subscription to %s/%d\n", bc.broker.ID(), child.topic, child.partition)
+ close(child.trigger)
+ delete(bc.subscriptions, child)
+ default:
+ break
+ }
+ }
+}
+
+func (bc *brokerConsumer) handleResponses() {
+ // handles the response codes left for us by our subscriptions, and abandons ones that have been closed
+ for child := range bc.subscriptions {
+ result := child.responseResult
+ child.responseResult = nil
+
+ switch result {
+ case nil:
+ break
+ case errTimedOut:
+ Logger.Printf("consumer/broker/%d abandoned subscription to %s/%d because consuming was taking too long\n",
+ bc.broker.ID(), child.topic, child.partition)
+ delete(bc.subscriptions, child)
+ case ErrOffsetOutOfRange:
+ // there's no point in retrying this it will just fail the same way again
+ // shut it down and force the user to choose what to do
+ child.sendError(result)
+ Logger.Printf("consumer/%s/%d shutting down because %s\n", child.topic, child.partition, result)
+ close(child.trigger)
+ delete(bc.subscriptions, child)
+ case ErrUnknownTopicOrPartition, ErrNotLeaderForPartition, ErrLeaderNotAvailable, ErrReplicaNotAvailable:
+ // not an error, but does need redispatching
+ Logger.Printf("consumer/broker/%d abandoned subscription to %s/%d because %s\n",
+ bc.broker.ID(), child.topic, child.partition, result)
+ child.trigger <- none{}
+ delete(bc.subscriptions, child)
+ default:
+ // dunno, tell the user and try redispatching
+ child.sendError(result)
+ Logger.Printf("consumer/broker/%d abandoned subscription to %s/%d because %s\n",
+ bc.broker.ID(), child.topic, child.partition, result)
+ child.trigger <- none{}
+ delete(bc.subscriptions, child)
+ }
+ }
+}
+
+func (bc *brokerConsumer) abort(err error) {
+ bc.consumer.abandonBrokerConsumer(bc)
+ _ = bc.broker.Close() // we don't care about the error this might return, we already have one
+
+ for child := range bc.subscriptions {
+ child.sendError(err)
+ child.trigger <- none{}
+ }
+
+ for newSubscriptions := range bc.newSubscriptions {
+ if len(newSubscriptions) == 0 {
+ <-bc.wait
+ continue
+ }
+ for _, child := range newSubscriptions {
+ child.sendError(err)
+ child.trigger <- none{}
+ }
+ }
+}
+
+func (bc *brokerConsumer) fetchNewMessages() (*FetchResponse, error) {
+ request := &FetchRequest{
+ MinBytes: bc.consumer.conf.Consumer.Fetch.Min,
+ MaxWaitTime: int32(bc.consumer.conf.Consumer.MaxWaitTime / time.Millisecond),
+ }
+ if bc.consumer.conf.Version.IsAtLeast(V0_10_0_0) {
+ request.Version = 2
+ }
+ if bc.consumer.conf.Version.IsAtLeast(V0_10_1_0) {
+ request.Version = 3
+ request.MaxBytes = MaxResponseSize
+ }
+
+ for child := range bc.subscriptions {
+ request.AddBlock(child.topic, child.partition, child.offset, child.fetchSize)
+ }
+
+ return bc.broker.Fetch(request)
+}
diff --git a/vendor/github.com/Shopify/sarama/consumer_group_members.go b/vendor/github.com/Shopify/sarama/consumer_group_members.go
new file mode 100644
index 00000000..9d92d350
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/consumer_group_members.go
@@ -0,0 +1,94 @@
+package sarama
+
+type ConsumerGroupMemberMetadata struct {
+ Version int16
+ Topics []string
+ UserData []byte
+}
+
+func (m *ConsumerGroupMemberMetadata) encode(pe packetEncoder) error {
+ pe.putInt16(m.Version)
+
+ if err := pe.putStringArray(m.Topics); err != nil {
+ return err
+ }
+
+ if err := pe.putBytes(m.UserData); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (m *ConsumerGroupMemberMetadata) decode(pd packetDecoder) (err error) {
+ if m.Version, err = pd.getInt16(); err != nil {
+ return
+ }
+
+ if m.Topics, err = pd.getStringArray(); err != nil {
+ return
+ }
+
+ if m.UserData, err = pd.getBytes(); err != nil {
+ return
+ }
+
+ return nil
+}
+
+type ConsumerGroupMemberAssignment struct {
+ Version int16
+ Topics map[string][]int32
+ UserData []byte
+}
+
+func (m *ConsumerGroupMemberAssignment) encode(pe packetEncoder) error {
+ pe.putInt16(m.Version)
+
+ if err := pe.putArrayLength(len(m.Topics)); err != nil {
+ return err
+ }
+
+ for topic, partitions := range m.Topics {
+ if err := pe.putString(topic); err != nil {
+ return err
+ }
+ if err := pe.putInt32Array(partitions); err != nil {
+ return err
+ }
+ }
+
+ if err := pe.putBytes(m.UserData); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (m *ConsumerGroupMemberAssignment) decode(pd packetDecoder) (err error) {
+ if m.Version, err = pd.getInt16(); err != nil {
+ return
+ }
+
+ var topicLen int
+ if topicLen, err = pd.getArrayLength(); err != nil {
+ return
+ }
+
+ m.Topics = make(map[string][]int32, topicLen)
+ for i := 0; i < topicLen; i++ {
+ var topic string
+ if topic, err = pd.getString(); err != nil {
+ return
+ }
+ if m.Topics[topic], err = pd.getInt32Array(); err != nil {
+ return
+ }
+ }
+
+ if m.UserData, err = pd.getBytes(); err != nil {
+ return
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/Shopify/sarama/consumer_metadata_request.go b/vendor/github.com/Shopify/sarama/consumer_metadata_request.go
new file mode 100644
index 00000000..483be335
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/consumer_metadata_request.go
@@ -0,0 +1,26 @@
+package sarama
+
+type ConsumerMetadataRequest struct {
+ ConsumerGroup string
+}
+
+func (r *ConsumerMetadataRequest) encode(pe packetEncoder) error {
+ return pe.putString(r.ConsumerGroup)
+}
+
+func (r *ConsumerMetadataRequest) decode(pd packetDecoder, version int16) (err error) {
+ r.ConsumerGroup, err = pd.getString()
+ return err
+}
+
+func (r *ConsumerMetadataRequest) key() int16 {
+ return 10
+}
+
+func (r *ConsumerMetadataRequest) version() int16 {
+ return 0
+}
+
+func (r *ConsumerMetadataRequest) requiredVersion() KafkaVersion {
+ return V0_8_2_0
+}
diff --git a/vendor/github.com/Shopify/sarama/consumer_metadata_response.go b/vendor/github.com/Shopify/sarama/consumer_metadata_response.go
new file mode 100644
index 00000000..6b9632bb
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/consumer_metadata_response.go
@@ -0,0 +1,85 @@
+package sarama
+
+import (
+ "net"
+ "strconv"
+)
+
+type ConsumerMetadataResponse struct {
+ Err KError
+ Coordinator *Broker
+ CoordinatorID int32 // deprecated: use Coordinator.ID()
+ CoordinatorHost string // deprecated: use Coordinator.Addr()
+ CoordinatorPort int32 // deprecated: use Coordinator.Addr()
+}
+
+func (r *ConsumerMetadataResponse) decode(pd packetDecoder, version int16) (err error) {
+ tmp, err := pd.getInt16()
+ if err != nil {
+ return err
+ }
+ r.Err = KError(tmp)
+
+ coordinator := new(Broker)
+ if err := coordinator.decode(pd); err != nil {
+ return err
+ }
+ if coordinator.addr == ":0" {
+ return nil
+ }
+ r.Coordinator = coordinator
+
+ // this can all go away in 2.0, but we have to fill in deprecated fields to maintain
+ // backwards compatibility
+ host, portstr, err := net.SplitHostPort(r.Coordinator.Addr())
+ if err != nil {
+ return err
+ }
+ port, err := strconv.ParseInt(portstr, 10, 32)
+ if err != nil {
+ return err
+ }
+ r.CoordinatorID = r.Coordinator.ID()
+ r.CoordinatorHost = host
+ r.CoordinatorPort = int32(port)
+
+ return nil
+}
+
+func (r *ConsumerMetadataResponse) encode(pe packetEncoder) error {
+ pe.putInt16(int16(r.Err))
+ if r.Coordinator != nil {
+ host, portstr, err := net.SplitHostPort(r.Coordinator.Addr())
+ if err != nil {
+ return err
+ }
+ port, err := strconv.ParseInt(portstr, 10, 32)
+ if err != nil {
+ return err
+ }
+ pe.putInt32(r.Coordinator.ID())
+ if err := pe.putString(host); err != nil {
+ return err
+ }
+ pe.putInt32(int32(port))
+ return nil
+ }
+ pe.putInt32(r.CoordinatorID)
+ if err := pe.putString(r.CoordinatorHost); err != nil {
+ return err
+ }
+ pe.putInt32(r.CoordinatorPort)
+ return nil
+}
+
+func (r *ConsumerMetadataResponse) key() int16 {
+ return 10
+}
+
+func (r *ConsumerMetadataResponse) version() int16 {
+ return 0
+}
+
+func (r *ConsumerMetadataResponse) requiredVersion() KafkaVersion {
+ return V0_8_2_0
+}
diff --git a/vendor/github.com/Shopify/sarama/crc32_field.go b/vendor/github.com/Shopify/sarama/crc32_field.go
new file mode 100644
index 00000000..f4fde18a
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/crc32_field.go
@@ -0,0 +1,35 @@
+package sarama
+
+import (
+ "encoding/binary"
+ "hash/crc32"
+)
+
+// crc32Field implements the pushEncoder and pushDecoder interfaces for calculating CRC32s.
+type crc32Field struct {
+ startOffset int
+}
+
+func (c *crc32Field) saveOffset(in int) {
+ c.startOffset = in
+}
+
+func (c *crc32Field) reserveLength() int {
+ return 4
+}
+
+func (c *crc32Field) run(curOffset int, buf []byte) error {
+ crc := crc32.ChecksumIEEE(buf[c.startOffset+4 : curOffset])
+ binary.BigEndian.PutUint32(buf[c.startOffset:], crc)
+ return nil
+}
+
+func (c *crc32Field) check(curOffset int, buf []byte) error {
+ crc := crc32.ChecksumIEEE(buf[c.startOffset+4 : curOffset])
+
+ if crc != binary.BigEndian.Uint32(buf[c.startOffset:]) {
+ return PacketDecodingError{"CRC didn't match"}
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/Shopify/sarama/describe_groups_request.go b/vendor/github.com/Shopify/sarama/describe_groups_request.go
new file mode 100644
index 00000000..1fb35677
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/describe_groups_request.go
@@ -0,0 +1,30 @@
+package sarama
+
+type DescribeGroupsRequest struct {
+ Groups []string
+}
+
+func (r *DescribeGroupsRequest) encode(pe packetEncoder) error {
+ return pe.putStringArray(r.Groups)
+}
+
+func (r *DescribeGroupsRequest) decode(pd packetDecoder, version int16) (err error) {
+ r.Groups, err = pd.getStringArray()
+ return
+}
+
+func (r *DescribeGroupsRequest) key() int16 {
+ return 15
+}
+
+func (r *DescribeGroupsRequest) version() int16 {
+ return 0
+}
+
+func (r *DescribeGroupsRequest) requiredVersion() KafkaVersion {
+ return V0_9_0_0
+}
+
+func (r *DescribeGroupsRequest) AddGroup(group string) {
+ r.Groups = append(r.Groups, group)
+}
diff --git a/vendor/github.com/Shopify/sarama/describe_groups_response.go b/vendor/github.com/Shopify/sarama/describe_groups_response.go
new file mode 100644
index 00000000..542b3a97
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/describe_groups_response.go
@@ -0,0 +1,187 @@
+package sarama
+
+type DescribeGroupsResponse struct {
+ Groups []*GroupDescription
+}
+
+func (r *DescribeGroupsResponse) encode(pe packetEncoder) error {
+ if err := pe.putArrayLength(len(r.Groups)); err != nil {
+ return err
+ }
+
+ for _, groupDescription := range r.Groups {
+ if err := groupDescription.encode(pe); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (r *DescribeGroupsResponse) decode(pd packetDecoder, version int16) (err error) {
+ n, err := pd.getArrayLength()
+ if err != nil {
+ return err
+ }
+
+ r.Groups = make([]*GroupDescription, n)
+ for i := 0; i < n; i++ {
+ r.Groups[i] = new(GroupDescription)
+ if err := r.Groups[i].decode(pd); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (r *DescribeGroupsResponse) key() int16 {
+ return 15
+}
+
+func (r *DescribeGroupsResponse) version() int16 {
+ return 0
+}
+
+func (r *DescribeGroupsResponse) requiredVersion() KafkaVersion {
+ return V0_9_0_0
+}
+
+type GroupDescription struct {
+ Err KError
+ GroupId string
+ State string
+ ProtocolType string
+ Protocol string
+ Members map[string]*GroupMemberDescription
+}
+
+func (gd *GroupDescription) encode(pe packetEncoder) error {
+ pe.putInt16(int16(gd.Err))
+
+ if err := pe.putString(gd.GroupId); err != nil {
+ return err
+ }
+ if err := pe.putString(gd.State); err != nil {
+ return err
+ }
+ if err := pe.putString(gd.ProtocolType); err != nil {
+ return err
+ }
+ if err := pe.putString(gd.Protocol); err != nil {
+ return err
+ }
+
+ if err := pe.putArrayLength(len(gd.Members)); err != nil {
+ return err
+ }
+
+ for memberId, groupMemberDescription := range gd.Members {
+ if err := pe.putString(memberId); err != nil {
+ return err
+ }
+ if err := groupMemberDescription.encode(pe); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (gd *GroupDescription) decode(pd packetDecoder) (err error) {
+ kerr, err := pd.getInt16()
+ if err != nil {
+ return err
+ }
+
+ gd.Err = KError(kerr)
+
+ if gd.GroupId, err = pd.getString(); err != nil {
+ return
+ }
+ if gd.State, err = pd.getString(); err != nil {
+ return
+ }
+ if gd.ProtocolType, err = pd.getString(); err != nil {
+ return
+ }
+ if gd.Protocol, err = pd.getString(); err != nil {
+ return
+ }
+
+ n, err := pd.getArrayLength()
+ if err != nil {
+ return err
+ }
+ if n == 0 {
+ return nil
+ }
+
+ gd.Members = make(map[string]*GroupMemberDescription)
+ for i := 0; i < n; i++ {
+ memberId, err := pd.getString()
+ if err != nil {
+ return err
+ }
+
+ gd.Members[memberId] = new(GroupMemberDescription)
+ if err := gd.Members[memberId].decode(pd); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+type GroupMemberDescription struct {
+ ClientId string
+ ClientHost string
+ MemberMetadata []byte
+ MemberAssignment []byte
+}
+
+func (gmd *GroupMemberDescription) encode(pe packetEncoder) error {
+ if err := pe.putString(gmd.ClientId); err != nil {
+ return err
+ }
+ if err := pe.putString(gmd.ClientHost); err != nil {
+ return err
+ }
+ if err := pe.putBytes(gmd.MemberMetadata); err != nil {
+ return err
+ }
+ if err := pe.putBytes(gmd.MemberAssignment); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (gmd *GroupMemberDescription) decode(pd packetDecoder) (err error) {
+ if gmd.ClientId, err = pd.getString(); err != nil {
+ return
+ }
+ if gmd.ClientHost, err = pd.getString(); err != nil {
+ return
+ }
+ if gmd.MemberMetadata, err = pd.getBytes(); err != nil {
+ return
+ }
+ if gmd.MemberAssignment, err = pd.getBytes(); err != nil {
+ return
+ }
+
+ return nil
+}
+
+func (gmd *GroupMemberDescription) GetMemberAssignment() (*ConsumerGroupMemberAssignment, error) {
+ assignment := new(ConsumerGroupMemberAssignment)
+ err := decode(gmd.MemberAssignment, assignment)
+ return assignment, err
+}
+
+func (gmd *GroupMemberDescription) GetMemberMetadata() (*ConsumerGroupMemberMetadata, error) {
+ metadata := new(ConsumerGroupMemberMetadata)
+ err := decode(gmd.MemberMetadata, metadata)
+ return metadata, err
+}
diff --git a/vendor/github.com/Shopify/sarama/dev.yml b/vendor/github.com/Shopify/sarama/dev.yml
new file mode 100644
index 00000000..adcf9421
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/dev.yml
@@ -0,0 +1,14 @@
+name: sarama
+
+up:
+ - go:
+ version: '1.8'
+
+commands:
+ test:
+ run: make test
+ desc: 'run unit tests'
+
+packages:
+ - git@github.com:Shopify/dev-shopify.git
+
diff --git a/vendor/github.com/Shopify/sarama/encoder_decoder.go b/vendor/github.com/Shopify/sarama/encoder_decoder.go
new file mode 100644
index 00000000..7ce3bc0f
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/encoder_decoder.go
@@ -0,0 +1,89 @@
+package sarama
+
+import (
+ "fmt"
+
+ "github.com/rcrowley/go-metrics"
+)
+
+// Encoder is the interface that wraps the basic Encode method.
+// Anything implementing Encoder can be turned into bytes using Kafka's encoding rules.
+type encoder interface {
+ encode(pe packetEncoder) error
+}
+
+// Encode takes an Encoder and turns it into bytes while potentially recording metrics.
+func encode(e encoder, metricRegistry metrics.Registry) ([]byte, error) {
+ if e == nil {
+ return nil, nil
+ }
+
+ var prepEnc prepEncoder
+ var realEnc realEncoder
+
+ err := e.encode(&prepEnc)
+ if err != nil {
+ return nil, err
+ }
+
+ if prepEnc.length < 0 || prepEnc.length > int(MaxRequestSize) {
+ return nil, PacketEncodingError{fmt.Sprintf("invalid request size (%d)", prepEnc.length)}
+ }
+
+ realEnc.raw = make([]byte, prepEnc.length)
+ realEnc.registry = metricRegistry
+ err = e.encode(&realEnc)
+ if err != nil {
+ return nil, err
+ }
+
+ return realEnc.raw, nil
+}
+
+// Decoder is the interface that wraps the basic Decode method.
+// Anything implementing Decoder can be extracted from bytes using Kafka's encoding rules.
+type decoder interface {
+ decode(pd packetDecoder) error
+}
+
+type versionedDecoder interface {
+ decode(pd packetDecoder, version int16) error
+}
+
+// Decode takes bytes and a Decoder and fills the fields of the decoder from the bytes,
+// interpreted using Kafka's encoding rules.
+func decode(buf []byte, in decoder) error {
+ if buf == nil {
+ return nil
+ }
+
+ helper := realDecoder{raw: buf}
+ err := in.decode(&helper)
+ if err != nil {
+ return err
+ }
+
+ if helper.off != len(buf) {
+ return PacketDecodingError{"invalid length"}
+ }
+
+ return nil
+}
+
+func versionedDecode(buf []byte, in versionedDecoder, version int16) error {
+ if buf == nil {
+ return nil
+ }
+
+ helper := realDecoder{raw: buf}
+ err := in.decode(&helper, version)
+ if err != nil {
+ return err
+ }
+
+ if helper.off != len(buf) {
+ return PacketDecodingError{"invalid length"}
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/Shopify/sarama/errors.go b/vendor/github.com/Shopify/sarama/errors.go
new file mode 100644
index 00000000..e6800ed4
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/errors.go
@@ -0,0 +1,221 @@
+package sarama
+
+import (
+ "errors"
+ "fmt"
+)
+
+// ErrOutOfBrokers is the error returned when the client has run out of brokers to talk to because all of them errored
+// or otherwise failed to respond.
+var ErrOutOfBrokers = errors.New("kafka: client has run out of available brokers to talk to (Is your cluster reachable?)")
+
+// ErrClosedClient is the error returned when a method is called on a client that has been closed.
+var ErrClosedClient = errors.New("kafka: tried to use a client that was closed")
+
+// ErrIncompleteResponse is the error returned when the server returns a syntactically valid response, but it does
+// not contain the expected information.
+var ErrIncompleteResponse = errors.New("kafka: response did not contain all the expected topic/partition blocks")
+
+// ErrInvalidPartition is the error returned when a partitioner returns an invalid partition index
+// (meaning one outside of the range [0...numPartitions-1]).
+var ErrInvalidPartition = errors.New("kafka: partitioner returned an invalid partition index")
+
+// ErrAlreadyConnected is the error returned when calling Open() on a Broker that is already connected or connecting.
+var ErrAlreadyConnected = errors.New("kafka: broker connection already initiated")
+
+// ErrNotConnected is the error returned when trying to send or call Close() on a Broker that is not connected.
+var ErrNotConnected = errors.New("kafka: broker not connected")
+
+// ErrInsufficientData is returned when decoding and the packet is truncated. This can be expected
+// when requesting messages, since as an optimization the server is allowed to return a partial message at the end
+// of the message set.
+var ErrInsufficientData = errors.New("kafka: insufficient data to decode packet, more bytes expected")
+
+// ErrShuttingDown is returned when a producer receives a message during shutdown.
+var ErrShuttingDown = errors.New("kafka: message received by producer in process of shutting down")
+
+// ErrMessageTooLarge is returned when the next message to consume is larger than the configured Consumer.Fetch.Max
+var ErrMessageTooLarge = errors.New("kafka: message is larger than Consumer.Fetch.Max")
+
+// PacketEncodingError is returned from a failure while encoding a Kafka packet. This can happen, for example,
+// if you try to encode a string over 2^15 characters in length, since Kafka's encoding rules do not permit that.
+type PacketEncodingError struct {
+ Info string
+}
+
+func (err PacketEncodingError) Error() string {
+ return fmt.Sprintf("kafka: error encoding packet: %s", err.Info)
+}
+
+// PacketDecodingError is returned when there was an error (other than truncated data) decoding the Kafka broker's response.
+// This can be a bad CRC or length field, or any other invalid value.
+type PacketDecodingError struct {
+ Info string
+}
+
+func (err PacketDecodingError) Error() string {
+ return fmt.Sprintf("kafka: error decoding packet: %s", err.Info)
+}
+
+// ConfigurationError is the type of error returned from a constructor (e.g. NewClient, or NewConsumer)
+// when the specified configuration is invalid.
+type ConfigurationError string
+
+func (err ConfigurationError) Error() string {
+ return "kafka: invalid configuration (" + string(err) + ")"
+}
+
+// KError is the type of error that can be returned directly by the Kafka broker.
+// See https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol#AGuideToTheKafkaProtocol-ErrorCodes
+type KError int16
+
+// Numeric error codes returned by the Kafka server.
+const (
+ ErrNoError KError = 0
+ ErrUnknown KError = -1
+ ErrOffsetOutOfRange KError = 1
+ ErrInvalidMessage KError = 2
+ ErrUnknownTopicOrPartition KError = 3
+ ErrInvalidMessageSize KError = 4
+ ErrLeaderNotAvailable KError = 5
+ ErrNotLeaderForPartition KError = 6
+ ErrRequestTimedOut KError = 7
+ ErrBrokerNotAvailable KError = 8
+ ErrReplicaNotAvailable KError = 9
+ ErrMessageSizeTooLarge KError = 10
+ ErrStaleControllerEpochCode KError = 11
+ ErrOffsetMetadataTooLarge KError = 12
+ ErrNetworkException KError = 13
+ ErrOffsetsLoadInProgress KError = 14
+ ErrConsumerCoordinatorNotAvailable KError = 15
+ ErrNotCoordinatorForConsumer KError = 16
+ ErrInvalidTopic KError = 17
+ ErrMessageSetSizeTooLarge KError = 18
+ ErrNotEnoughReplicas KError = 19
+ ErrNotEnoughReplicasAfterAppend KError = 20
+ ErrInvalidRequiredAcks KError = 21
+ ErrIllegalGeneration KError = 22
+ ErrInconsistentGroupProtocol KError = 23
+ ErrInvalidGroupId KError = 24
+ ErrUnknownMemberId KError = 25
+ ErrInvalidSessionTimeout KError = 26
+ ErrRebalanceInProgress KError = 27
+ ErrInvalidCommitOffsetSize KError = 28
+ ErrTopicAuthorizationFailed KError = 29
+ ErrGroupAuthorizationFailed KError = 30
+ ErrClusterAuthorizationFailed KError = 31
+ ErrInvalidTimestamp KError = 32
+ ErrUnsupportedSASLMechanism KError = 33
+ ErrIllegalSASLState KError = 34
+ ErrUnsupportedVersion KError = 35
+ ErrTopicAlreadyExists KError = 36
+ ErrInvalidPartitions KError = 37
+ ErrInvalidReplicationFactor KError = 38
+ ErrInvalidReplicaAssignment KError = 39
+ ErrInvalidConfig KError = 40
+ ErrNotController KError = 41
+ ErrInvalidRequest KError = 42
+ ErrUnsupportedForMessageFormat KError = 43
+ ErrPolicyViolation KError = 44
+)
+
+func (err KError) Error() string {
+ // Error messages stolen/adapted from
+ // https://kafka.apache.org/protocol#protocol_error_codes
+ switch err {
+ case ErrNoError:
+ return "kafka server: Not an error, why are you printing me?"
+ case ErrUnknown:
+ return "kafka server: Unexpected (unknown?) server error."
+ case ErrOffsetOutOfRange:
+ return "kafka server: The requested offset is outside the range of offsets maintained by the server for the given topic/partition."
+ case ErrInvalidMessage:
+ return "kafka server: Message contents does not match its CRC."
+ case ErrUnknownTopicOrPartition:
+ return "kafka server: Request was for a topic or partition that does not exist on this broker."
+ case ErrInvalidMessageSize:
+ return "kafka server: The message has a negative size."
+ case ErrLeaderNotAvailable:
+ return "kafka server: In the middle of a leadership election, there is currently no leader for this partition and hence it is unavailable for writes."
+ case ErrNotLeaderForPartition:
+ return "kafka server: Tried to send a message to a replica that is not the leader for some partition. Your metadata is out of date."
+ case ErrRequestTimedOut:
+ return "kafka server: Request exceeded the user-specified time limit in the request."
+ case ErrBrokerNotAvailable:
+ return "kafka server: Broker not available. Not a client facing error, we should never receive this!!!"
+ case ErrReplicaNotAvailable:
+ return "kafka server: Replica information not available, one or more brokers are down."
+ case ErrMessageSizeTooLarge:
+ return "kafka server: Message was too large, server rejected it to avoid allocation error."
+ case ErrStaleControllerEpochCode:
+ return "kafka server: StaleControllerEpochCode (internal error code for broker-to-broker communication)."
+ case ErrOffsetMetadataTooLarge:
+ return "kafka server: Specified a string larger than the configured maximum for offset metadata."
+ case ErrNetworkException:
+ return "kafka server: The server disconnected before a response was received."
+ case ErrOffsetsLoadInProgress:
+ return "kafka server: The broker is still loading offsets after a leader change for that offset's topic partition."
+ case ErrConsumerCoordinatorNotAvailable:
+ return "kafka server: Offset's topic has not yet been created."
+ case ErrNotCoordinatorForConsumer:
+ return "kafka server: Request was for a consumer group that is not coordinated by this broker."
+ case ErrInvalidTopic:
+ return "kafka server: The request attempted to perform an operation on an invalid topic."
+ case ErrMessageSetSizeTooLarge:
+ return "kafka server: The request included message batch larger than the configured segment size on the server."
+ case ErrNotEnoughReplicas:
+ return "kafka server: Messages are rejected since there are fewer in-sync replicas than required."
+ case ErrNotEnoughReplicasAfterAppend:
+ return "kafka server: Messages are written to the log, but to fewer in-sync replicas than required."
+ case ErrInvalidRequiredAcks:
+ return "kafka server: The number of required acks is invalid (should be either -1, 0, or 1)."
+ case ErrIllegalGeneration:
+ return "kafka server: The provided generation id is not the current generation."
+ case ErrInconsistentGroupProtocol:
+ return "kafka server: The provider group protocol type is incompatible with the other members."
+ case ErrInvalidGroupId:
+ return "kafka server: The provided group id was empty."
+ case ErrUnknownMemberId:
+ return "kafka server: The provided member is not known in the current generation."
+ case ErrInvalidSessionTimeout:
+ return "kafka server: The provided session timeout is outside the allowed range."
+ case ErrRebalanceInProgress:
+ return "kafka server: A rebalance for the group is in progress. Please re-join the group."
+ case ErrInvalidCommitOffsetSize:
+ return "kafka server: The provided commit metadata was too large."
+ case ErrTopicAuthorizationFailed:
+ return "kafka server: The client is not authorized to access this topic."
+ case ErrGroupAuthorizationFailed:
+ return "kafka server: The client is not authorized to access this group."
+ case ErrClusterAuthorizationFailed:
+ return "kafka server: The client is not authorized to send this request type."
+ case ErrInvalidTimestamp:
+ return "kafka server: The timestamp of the message is out of acceptable range."
+ case ErrUnsupportedSASLMechanism:
+ return "kafka server: The broker does not support the requested SASL mechanism."
+ case ErrIllegalSASLState:
+ return "kafka server: Request is not valid given the current SASL state."
+ case ErrUnsupportedVersion:
+ return "kafka server: The version of API is not supported."
+ case ErrTopicAlreadyExists:
+ return "kafka server: Topic with this name already exists."
+ case ErrInvalidPartitions:
+ return "kafka server: Number of partitions is invalid."
+ case ErrInvalidReplicationFactor:
+ return "kafka server: Replication-factor is invalid."
+ case ErrInvalidReplicaAssignment:
+ return "kafka server: Replica assignment is invalid."
+ case ErrInvalidConfig:
+ return "kafka server: Configuration is invalid."
+ case ErrNotController:
+ return "kafka server: This is not the correct controller for this cluster."
+ case ErrInvalidRequest:
+ return "kafka server: This most likely occurs because of a request being malformed by the client library or the message was sent to an incompatible broker. See the broker logs for more details."
+ case ErrUnsupportedForMessageFormat:
+ return "kafka server: The requested operation is not supported by the message format version."
+ case ErrPolicyViolation:
+ return "kafka server: Request parameters do not satisfy the configured policy."
+ }
+
+ return fmt.Sprintf("Unknown error, how did this happen? Error code = %d", err)
+}
diff --git a/vendor/github.com/Shopify/sarama/fetch_request.go b/vendor/github.com/Shopify/sarama/fetch_request.go
new file mode 100644
index 00000000..65600e86
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/fetch_request.go
@@ -0,0 +1,150 @@
+package sarama
+
+type fetchRequestBlock struct {
+ fetchOffset int64
+ maxBytes int32
+}
+
+func (b *fetchRequestBlock) encode(pe packetEncoder) error {
+ pe.putInt64(b.fetchOffset)
+ pe.putInt32(b.maxBytes)
+ return nil
+}
+
+func (b *fetchRequestBlock) decode(pd packetDecoder) (err error) {
+ if b.fetchOffset, err = pd.getInt64(); err != nil {
+ return err
+ }
+ if b.maxBytes, err = pd.getInt32(); err != nil {
+ return err
+ }
+ return nil
+}
+
+// FetchRequest (API key 1) will fetch Kafka messages. Version 3 introduced the MaxBytes field. See
+// https://issues.apache.org/jira/browse/KAFKA-2063 for a discussion of the issues leading up to that. The KIP is at
+// https://cwiki.apache.org/confluence/display/KAFKA/KIP-74%3A+Add+Fetch+Response+Size+Limit+in+Bytes
+type FetchRequest struct {
+ MaxWaitTime int32
+ MinBytes int32
+ MaxBytes int32
+ Version int16
+ blocks map[string]map[int32]*fetchRequestBlock
+}
+
+func (r *FetchRequest) encode(pe packetEncoder) (err error) {
+ pe.putInt32(-1) // replica ID is always -1 for clients
+ pe.putInt32(r.MaxWaitTime)
+ pe.putInt32(r.MinBytes)
+ if r.Version == 3 {
+ pe.putInt32(r.MaxBytes)
+ }
+ err = pe.putArrayLength(len(r.blocks))
+ if err != nil {
+ return err
+ }
+ for topic, blocks := range r.blocks {
+ err = pe.putString(topic)
+ if err != nil {
+ return err
+ }
+ err = pe.putArrayLength(len(blocks))
+ if err != nil {
+ return err
+ }
+ for partition, block := range blocks {
+ pe.putInt32(partition)
+ err = block.encode(pe)
+ if err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+func (r *FetchRequest) decode(pd packetDecoder, version int16) (err error) {
+ r.Version = version
+ if _, err = pd.getInt32(); err != nil {
+ return err
+ }
+ if r.MaxWaitTime, err = pd.getInt32(); err != nil {
+ return err
+ }
+ if r.MinBytes, err = pd.getInt32(); err != nil {
+ return err
+ }
+ if r.Version == 3 {
+ if r.MaxBytes, err = pd.getInt32(); err != nil {
+ return err
+ }
+ }
+ topicCount, err := pd.getArrayLength()
+ if err != nil {
+ return err
+ }
+ if topicCount == 0 {
+ return nil
+ }
+ r.blocks = make(map[string]map[int32]*fetchRequestBlock)
+ for i := 0; i < topicCount; i++ {
+ topic, err := pd.getString()
+ if err != nil {
+ return err
+ }
+ partitionCount, err := pd.getArrayLength()
+ if err != nil {
+ return err
+ }
+ r.blocks[topic] = make(map[int32]*fetchRequestBlock)
+ for j := 0; j < partitionCount; j++ {
+ partition, err := pd.getInt32()
+ if err != nil {
+ return err
+ }
+ fetchBlock := &fetchRequestBlock{}
+ if err = fetchBlock.decode(pd); err != nil {
+ return err
+ }
+ r.blocks[topic][partition] = fetchBlock
+ }
+ }
+ return nil
+}
+
+func (r *FetchRequest) key() int16 {
+ return 1
+}
+
+func (r *FetchRequest) version() int16 {
+ return r.Version
+}
+
+func (r *FetchRequest) requiredVersion() KafkaVersion {
+ switch r.Version {
+ case 1:
+ return V0_9_0_0
+ case 2:
+ return V0_10_0_0
+ case 3:
+ return V0_10_1_0
+ default:
+ return minVersion
+ }
+}
+
+func (r *FetchRequest) AddBlock(topic string, partitionID int32, fetchOffset int64, maxBytes int32) {
+ if r.blocks == nil {
+ r.blocks = make(map[string]map[int32]*fetchRequestBlock)
+ }
+
+ if r.blocks[topic] == nil {
+ r.blocks[topic] = make(map[int32]*fetchRequestBlock)
+ }
+
+ tmp := new(fetchRequestBlock)
+ tmp.maxBytes = maxBytes
+ tmp.fetchOffset = fetchOffset
+
+ r.blocks[topic][partitionID] = tmp
+}
diff --git a/vendor/github.com/Shopify/sarama/fetch_response.go b/vendor/github.com/Shopify/sarama/fetch_response.go
new file mode 100644
index 00000000..b56b166c
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/fetch_response.go
@@ -0,0 +1,210 @@
+package sarama
+
+import "time"
+
+type FetchResponseBlock struct {
+ Err KError
+ HighWaterMarkOffset int64
+ MsgSet MessageSet
+}
+
+func (b *FetchResponseBlock) decode(pd packetDecoder) (err error) {
+ tmp, err := pd.getInt16()
+ if err != nil {
+ return err
+ }
+ b.Err = KError(tmp)
+
+ b.HighWaterMarkOffset, err = pd.getInt64()
+ if err != nil {
+ return err
+ }
+
+ msgSetSize, err := pd.getInt32()
+ if err != nil {
+ return err
+ }
+
+ msgSetDecoder, err := pd.getSubset(int(msgSetSize))
+ if err != nil {
+ return err
+ }
+ err = (&b.MsgSet).decode(msgSetDecoder)
+
+ return err
+}
+
+func (b *FetchResponseBlock) encode(pe packetEncoder) (err error) {
+ pe.putInt16(int16(b.Err))
+
+ pe.putInt64(b.HighWaterMarkOffset)
+
+ pe.push(&lengthField{})
+ err = b.MsgSet.encode(pe)
+ if err != nil {
+ return err
+ }
+ return pe.pop()
+}
+
+type FetchResponse struct {
+ Blocks map[string]map[int32]*FetchResponseBlock
+ ThrottleTime time.Duration
+ Version int16 // v1 requires 0.9+, v2 requires 0.10+
+}
+
+func (r *FetchResponse) decode(pd packetDecoder, version int16) (err error) {
+ r.Version = version
+
+ if r.Version >= 1 {
+ throttle, err := pd.getInt32()
+ if err != nil {
+ return err
+ }
+ r.ThrottleTime = time.Duration(throttle) * time.Millisecond
+ }
+
+ numTopics, err := pd.getArrayLength()
+ if err != nil {
+ return err
+ }
+
+ r.Blocks = make(map[string]map[int32]*FetchResponseBlock, numTopics)
+ for i := 0; i < numTopics; i++ {
+ name, err := pd.getString()
+ if err != nil {
+ return err
+ }
+
+ numBlocks, err := pd.getArrayLength()
+ if err != nil {
+ return err
+ }
+
+ r.Blocks[name] = make(map[int32]*FetchResponseBlock, numBlocks)
+
+ for j := 0; j < numBlocks; j++ {
+ id, err := pd.getInt32()
+ if err != nil {
+ return err
+ }
+
+ block := new(FetchResponseBlock)
+ err = block.decode(pd)
+ if err != nil {
+ return err
+ }
+ r.Blocks[name][id] = block
+ }
+ }
+
+ return nil
+}
+
+func (r *FetchResponse) encode(pe packetEncoder) (err error) {
+ if r.Version >= 1 {
+ pe.putInt32(int32(r.ThrottleTime / time.Millisecond))
+ }
+
+ err = pe.putArrayLength(len(r.Blocks))
+ if err != nil {
+ return err
+ }
+
+ for topic, partitions := range r.Blocks {
+ err = pe.putString(topic)
+ if err != nil {
+ return err
+ }
+
+ err = pe.putArrayLength(len(partitions))
+ if err != nil {
+ return err
+ }
+
+ for id, block := range partitions {
+ pe.putInt32(id)
+ err = block.encode(pe)
+ if err != nil {
+ return err
+ }
+ }
+
+ }
+ return nil
+}
+
+func (r *FetchResponse) key() int16 {
+ return 1
+}
+
+func (r *FetchResponse) version() int16 {
+ return r.Version
+}
+
+func (r *FetchResponse) requiredVersion() KafkaVersion {
+ switch r.Version {
+ case 1:
+ return V0_9_0_0
+ case 2:
+ return V0_10_0_0
+ default:
+ return minVersion
+ }
+}
+
+func (r *FetchResponse) GetBlock(topic string, partition int32) *FetchResponseBlock {
+ if r.Blocks == nil {
+ return nil
+ }
+
+ if r.Blocks[topic] == nil {
+ return nil
+ }
+
+ return r.Blocks[topic][partition]
+}
+
+func (r *FetchResponse) AddError(topic string, partition int32, err KError) {
+ if r.Blocks == nil {
+ r.Blocks = make(map[string]map[int32]*FetchResponseBlock)
+ }
+ partitions, ok := r.Blocks[topic]
+ if !ok {
+ partitions = make(map[int32]*FetchResponseBlock)
+ r.Blocks[topic] = partitions
+ }
+ frb, ok := partitions[partition]
+ if !ok {
+ frb = new(FetchResponseBlock)
+ partitions[partition] = frb
+ }
+ frb.Err = err
+}
+
+func (r *FetchResponse) AddMessage(topic string, partition int32, key, value Encoder, offset int64) {
+ if r.Blocks == nil {
+ r.Blocks = make(map[string]map[int32]*FetchResponseBlock)
+ }
+ partitions, ok := r.Blocks[topic]
+ if !ok {
+ partitions = make(map[int32]*FetchResponseBlock)
+ r.Blocks[topic] = partitions
+ }
+ frb, ok := partitions[partition]
+ if !ok {
+ frb = new(FetchResponseBlock)
+ partitions[partition] = frb
+ }
+ var kb []byte
+ var vb []byte
+ if key != nil {
+ kb, _ = key.Encode()
+ }
+ if value != nil {
+ vb, _ = value.Encode()
+ }
+ msg := &Message{Key: kb, Value: vb}
+ msgBlock := &MessageBlock{Msg: msg, Offset: offset}
+ frb.MsgSet.Messages = append(frb.MsgSet.Messages, msgBlock)
+}
diff --git a/vendor/github.com/Shopify/sarama/heartbeat_request.go b/vendor/github.com/Shopify/sarama/heartbeat_request.go
new file mode 100644
index 00000000..ce49c473
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/heartbeat_request.go
@@ -0,0 +1,47 @@
+package sarama
+
+type HeartbeatRequest struct {
+ GroupId string
+ GenerationId int32
+ MemberId string
+}
+
+func (r *HeartbeatRequest) encode(pe packetEncoder) error {
+ if err := pe.putString(r.GroupId); err != nil {
+ return err
+ }
+
+ pe.putInt32(r.GenerationId)
+
+ if err := pe.putString(r.MemberId); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (r *HeartbeatRequest) decode(pd packetDecoder, version int16) (err error) {
+ if r.GroupId, err = pd.getString(); err != nil {
+ return
+ }
+ if r.GenerationId, err = pd.getInt32(); err != nil {
+ return
+ }
+ if r.MemberId, err = pd.getString(); err != nil {
+ return
+ }
+
+ return nil
+}
+
+func (r *HeartbeatRequest) key() int16 {
+ return 12
+}
+
+func (r *HeartbeatRequest) version() int16 {
+ return 0
+}
+
+func (r *HeartbeatRequest) requiredVersion() KafkaVersion {
+ return V0_9_0_0
+}
diff --git a/vendor/github.com/Shopify/sarama/heartbeat_response.go b/vendor/github.com/Shopify/sarama/heartbeat_response.go
new file mode 100644
index 00000000..766f5fde
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/heartbeat_response.go
@@ -0,0 +1,32 @@
+package sarama
+
+type HeartbeatResponse struct {
+ Err KError
+}
+
+func (r *HeartbeatResponse) encode(pe packetEncoder) error {
+ pe.putInt16(int16(r.Err))
+ return nil
+}
+
+func (r *HeartbeatResponse) decode(pd packetDecoder, version int16) error {
+ kerr, err := pd.getInt16()
+ if err != nil {
+ return err
+ }
+ r.Err = KError(kerr)
+
+ return nil
+}
+
+func (r *HeartbeatResponse) key() int16 {
+ return 12
+}
+
+func (r *HeartbeatResponse) version() int16 {
+ return 0
+}
+
+func (r *HeartbeatResponse) requiredVersion() KafkaVersion {
+ return V0_9_0_0
+}
diff --git a/vendor/github.com/Shopify/sarama/join_group_request.go b/vendor/github.com/Shopify/sarama/join_group_request.go
new file mode 100644
index 00000000..3a7ba171
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/join_group_request.go
@@ -0,0 +1,143 @@
+package sarama
+
+type GroupProtocol struct {
+ Name string
+ Metadata []byte
+}
+
+func (p *GroupProtocol) decode(pd packetDecoder) (err error) {
+ p.Name, err = pd.getString()
+ if err != nil {
+ return err
+ }
+ p.Metadata, err = pd.getBytes()
+ return err
+}
+
+func (p *GroupProtocol) encode(pe packetEncoder) (err error) {
+ if err := pe.putString(p.Name); err != nil {
+ return err
+ }
+ if err := pe.putBytes(p.Metadata); err != nil {
+ return err
+ }
+ return nil
+}
+
+type JoinGroupRequest struct {
+ GroupId string
+ SessionTimeout int32
+ MemberId string
+ ProtocolType string
+ GroupProtocols map[string][]byte // deprecated; use OrderedGroupProtocols
+ OrderedGroupProtocols []*GroupProtocol
+}
+
+func (r *JoinGroupRequest) encode(pe packetEncoder) error {
+ if err := pe.putString(r.GroupId); err != nil {
+ return err
+ }
+ pe.putInt32(r.SessionTimeout)
+ if err := pe.putString(r.MemberId); err != nil {
+ return err
+ }
+ if err := pe.putString(r.ProtocolType); err != nil {
+ return err
+ }
+
+ if len(r.GroupProtocols) > 0 {
+ if len(r.OrderedGroupProtocols) > 0 {
+ return PacketDecodingError{"cannot specify both GroupProtocols and OrderedGroupProtocols on JoinGroupRequest"}
+ }
+
+ if err := pe.putArrayLength(len(r.GroupProtocols)); err != nil {
+ return err
+ }
+ for name, metadata := range r.GroupProtocols {
+ if err := pe.putString(name); err != nil {
+ return err
+ }
+ if err := pe.putBytes(metadata); err != nil {
+ return err
+ }
+ }
+ } else {
+ if err := pe.putArrayLength(len(r.OrderedGroupProtocols)); err != nil {
+ return err
+ }
+ for _, protocol := range r.OrderedGroupProtocols {
+ if err := protocol.encode(pe); err != nil {
+ return err
+ }
+ }
+ }
+
+ return nil
+}
+
+func (r *JoinGroupRequest) decode(pd packetDecoder, version int16) (err error) {
+ if r.GroupId, err = pd.getString(); err != nil {
+ return
+ }
+
+ if r.SessionTimeout, err = pd.getInt32(); err != nil {
+ return
+ }
+
+ if r.MemberId, err = pd.getString(); err != nil {
+ return
+ }
+
+ if r.ProtocolType, err = pd.getString(); err != nil {
+ return
+ }
+
+ n, err := pd.getArrayLength()
+ if err != nil {
+ return err
+ }
+ if n == 0 {
+ return nil
+ }
+
+ r.GroupProtocols = make(map[string][]byte)
+ for i := 0; i < n; i++ {
+ protocol := &GroupProtocol{}
+ if err := protocol.decode(pd); err != nil {
+ return err
+ }
+ r.GroupProtocols[protocol.Name] = protocol.Metadata
+ r.OrderedGroupProtocols = append(r.OrderedGroupProtocols, protocol)
+ }
+
+ return nil
+}
+
+func (r *JoinGroupRequest) key() int16 {
+ return 11
+}
+
+func (r *JoinGroupRequest) version() int16 {
+ return 0
+}
+
+func (r *JoinGroupRequest) requiredVersion() KafkaVersion {
+ return V0_9_0_0
+}
+
+func (r *JoinGroupRequest) AddGroupProtocol(name string, metadata []byte) {
+ r.OrderedGroupProtocols = append(r.OrderedGroupProtocols, &GroupProtocol{
+ Name: name,
+ Metadata: metadata,
+ })
+}
+
+func (r *JoinGroupRequest) AddGroupProtocolMetadata(name string, metadata *ConsumerGroupMemberMetadata) error {
+ bin, err := encode(metadata, nil)
+ if err != nil {
+ return err
+ }
+
+ r.AddGroupProtocol(name, bin)
+ return nil
+}
diff --git a/vendor/github.com/Shopify/sarama/join_group_response.go b/vendor/github.com/Shopify/sarama/join_group_response.go
new file mode 100644
index 00000000..6d35fe36
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/join_group_response.go
@@ -0,0 +1,115 @@
+package sarama
+
+type JoinGroupResponse struct {
+ Err KError
+ GenerationId int32
+ GroupProtocol string
+ LeaderId string
+ MemberId string
+ Members map[string][]byte
+}
+
+func (r *JoinGroupResponse) GetMembers() (map[string]ConsumerGroupMemberMetadata, error) {
+ members := make(map[string]ConsumerGroupMemberMetadata, len(r.Members))
+ for id, bin := range r.Members {
+ meta := new(ConsumerGroupMemberMetadata)
+ if err := decode(bin, meta); err != nil {
+ return nil, err
+ }
+ members[id] = *meta
+ }
+ return members, nil
+}
+
+func (r *JoinGroupResponse) encode(pe packetEncoder) error {
+ pe.putInt16(int16(r.Err))
+ pe.putInt32(r.GenerationId)
+
+ if err := pe.putString(r.GroupProtocol); err != nil {
+ return err
+ }
+ if err := pe.putString(r.LeaderId); err != nil {
+ return err
+ }
+ if err := pe.putString(r.MemberId); err != nil {
+ return err
+ }
+
+ if err := pe.putArrayLength(len(r.Members)); err != nil {
+ return err
+ }
+
+ for memberId, memberMetadata := range r.Members {
+ if err := pe.putString(memberId); err != nil {
+ return err
+ }
+
+ if err := pe.putBytes(memberMetadata); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (r *JoinGroupResponse) decode(pd packetDecoder, version int16) (err error) {
+ kerr, err := pd.getInt16()
+ if err != nil {
+ return err
+ }
+
+ r.Err = KError(kerr)
+
+ if r.GenerationId, err = pd.getInt32(); err != nil {
+ return
+ }
+
+ if r.GroupProtocol, err = pd.getString(); err != nil {
+ return
+ }
+
+ if r.LeaderId, err = pd.getString(); err != nil {
+ return
+ }
+
+ if r.MemberId, err = pd.getString(); err != nil {
+ return
+ }
+
+ n, err := pd.getArrayLength()
+ if err != nil {
+ return err
+ }
+ if n == 0 {
+ return nil
+ }
+
+ r.Members = make(map[string][]byte)
+ for i := 0; i < n; i++ {
+ memberId, err := pd.getString()
+ if err != nil {
+ return err
+ }
+
+ memberMetadata, err := pd.getBytes()
+ if err != nil {
+ return err
+ }
+
+ r.Members[memberId] = memberMetadata
+ }
+
+ return nil
+}
+
+func (r *JoinGroupResponse) key() int16 {
+ return 11
+}
+
+func (r *JoinGroupResponse) version() int16 {
+ return 0
+}
+
+func (r *JoinGroupResponse) requiredVersion() KafkaVersion {
+ return V0_9_0_0
+}
diff --git a/vendor/github.com/Shopify/sarama/leave_group_request.go b/vendor/github.com/Shopify/sarama/leave_group_request.go
new file mode 100644
index 00000000..e1774274
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/leave_group_request.go
@@ -0,0 +1,40 @@
+package sarama
+
+type LeaveGroupRequest struct {
+ GroupId string
+ MemberId string
+}
+
+func (r *LeaveGroupRequest) encode(pe packetEncoder) error {
+ if err := pe.putString(r.GroupId); err != nil {
+ return err
+ }
+ if err := pe.putString(r.MemberId); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (r *LeaveGroupRequest) decode(pd packetDecoder, version int16) (err error) {
+ if r.GroupId, err = pd.getString(); err != nil {
+ return
+ }
+ if r.MemberId, err = pd.getString(); err != nil {
+ return
+ }
+
+ return nil
+}
+
+func (r *LeaveGroupRequest) key() int16 {
+ return 13
+}
+
+func (r *LeaveGroupRequest) version() int16 {
+ return 0
+}
+
+func (r *LeaveGroupRequest) requiredVersion() KafkaVersion {
+ return V0_9_0_0
+}
diff --git a/vendor/github.com/Shopify/sarama/leave_group_response.go b/vendor/github.com/Shopify/sarama/leave_group_response.go
new file mode 100644
index 00000000..d60c626d
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/leave_group_response.go
@@ -0,0 +1,32 @@
+package sarama
+
+type LeaveGroupResponse struct {
+ Err KError
+}
+
+func (r *LeaveGroupResponse) encode(pe packetEncoder) error {
+ pe.putInt16(int16(r.Err))
+ return nil
+}
+
+func (r *LeaveGroupResponse) decode(pd packetDecoder, version int16) (err error) {
+ kerr, err := pd.getInt16()
+ if err != nil {
+ return err
+ }
+ r.Err = KError(kerr)
+
+ return nil
+}
+
+func (r *LeaveGroupResponse) key() int16 {
+ return 13
+}
+
+func (r *LeaveGroupResponse) version() int16 {
+ return 0
+}
+
+func (r *LeaveGroupResponse) requiredVersion() KafkaVersion {
+ return V0_9_0_0
+}
diff --git a/vendor/github.com/Shopify/sarama/length_field.go b/vendor/github.com/Shopify/sarama/length_field.go
new file mode 100644
index 00000000..70078be5
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/length_field.go
@@ -0,0 +1,29 @@
+package sarama
+
+import "encoding/binary"
+
+// LengthField implements the PushEncoder and PushDecoder interfaces for calculating 4-byte lengths.
+type lengthField struct {
+ startOffset int
+}
+
+func (l *lengthField) saveOffset(in int) {
+ l.startOffset = in
+}
+
+func (l *lengthField) reserveLength() int {
+ return 4
+}
+
+func (l *lengthField) run(curOffset int, buf []byte) error {
+ binary.BigEndian.PutUint32(buf[l.startOffset:], uint32(curOffset-l.startOffset-4))
+ return nil
+}
+
+func (l *lengthField) check(curOffset int, buf []byte) error {
+ if uint32(curOffset-l.startOffset-4) != binary.BigEndian.Uint32(buf[l.startOffset:]) {
+ return PacketDecodingError{"length field invalid"}
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/Shopify/sarama/list_groups_request.go b/vendor/github.com/Shopify/sarama/list_groups_request.go
new file mode 100644
index 00000000..3b16abf7
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/list_groups_request.go
@@ -0,0 +1,24 @@
+package sarama
+
+type ListGroupsRequest struct {
+}
+
+func (r *ListGroupsRequest) encode(pe packetEncoder) error {
+ return nil
+}
+
+func (r *ListGroupsRequest) decode(pd packetDecoder, version int16) (err error) {
+ return nil
+}
+
+func (r *ListGroupsRequest) key() int16 {
+ return 16
+}
+
+func (r *ListGroupsRequest) version() int16 {
+ return 0
+}
+
+func (r *ListGroupsRequest) requiredVersion() KafkaVersion {
+ return V0_9_0_0
+}
diff --git a/vendor/github.com/Shopify/sarama/list_groups_response.go b/vendor/github.com/Shopify/sarama/list_groups_response.go
new file mode 100644
index 00000000..56115d4c
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/list_groups_response.go
@@ -0,0 +1,69 @@
+package sarama
+
+type ListGroupsResponse struct {
+ Err KError
+ Groups map[string]string
+}
+
+func (r *ListGroupsResponse) encode(pe packetEncoder) error {
+ pe.putInt16(int16(r.Err))
+
+ if err := pe.putArrayLength(len(r.Groups)); err != nil {
+ return err
+ }
+ for groupId, protocolType := range r.Groups {
+ if err := pe.putString(groupId); err != nil {
+ return err
+ }
+ if err := pe.putString(protocolType); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (r *ListGroupsResponse) decode(pd packetDecoder, version int16) error {
+ kerr, err := pd.getInt16()
+ if err != nil {
+ return err
+ }
+
+ r.Err = KError(kerr)
+
+ n, err := pd.getArrayLength()
+ if err != nil {
+ return err
+ }
+ if n == 0 {
+ return nil
+ }
+
+ r.Groups = make(map[string]string)
+ for i := 0; i < n; i++ {
+ groupId, err := pd.getString()
+ if err != nil {
+ return err
+ }
+ protocolType, err := pd.getString()
+ if err != nil {
+ return err
+ }
+
+ r.Groups[groupId] = protocolType
+ }
+
+ return nil
+}
+
+func (r *ListGroupsResponse) key() int16 {
+ return 16
+}
+
+func (r *ListGroupsResponse) version() int16 {
+ return 0
+}
+
+func (r *ListGroupsResponse) requiredVersion() KafkaVersion {
+ return V0_9_0_0
+}
diff --git a/vendor/github.com/Shopify/sarama/message.go b/vendor/github.com/Shopify/sarama/message.go
new file mode 100644
index 00000000..86b4ac32
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/message.go
@@ -0,0 +1,212 @@
+package sarama
+
+import (
+ "bytes"
+ "compress/gzip"
+ "fmt"
+ "io/ioutil"
+ "time"
+
+ "github.com/eapache/go-xerial-snappy"
+ "github.com/pierrec/lz4"
+)
+
+// CompressionCodec represents the various compression codecs recognized by Kafka in messages.
+type CompressionCodec int8
+
+// only the last two bits are really used
+const compressionCodecMask int8 = 0x03
+
+const (
+ CompressionNone CompressionCodec = 0
+ CompressionGZIP CompressionCodec = 1
+ CompressionSnappy CompressionCodec = 2
+ CompressionLZ4 CompressionCodec = 3
+)
+
+type Message struct {
+ Codec CompressionCodec // codec used to compress the message contents
+ Key []byte // the message key, may be nil
+ Value []byte // the message contents
+ Set *MessageSet // the message set a message might wrap
+ Version int8 // v1 requires Kafka 0.10
+ Timestamp time.Time // the timestamp of the message (version 1+ only)
+
+ compressedCache []byte
+ compressedSize int // used for computing the compression ratio metrics
+}
+
+func (m *Message) encode(pe packetEncoder) error {
+ pe.push(&crc32Field{})
+
+ pe.putInt8(m.Version)
+
+ attributes := int8(m.Codec) & compressionCodecMask
+ pe.putInt8(attributes)
+
+ if m.Version >= 1 {
+ timestamp := int64(-1)
+
+ if !m.Timestamp.Before(time.Unix(0, 0)) {
+ timestamp = m.Timestamp.UnixNano() / int64(time.Millisecond)
+ } else if !m.Timestamp.IsZero() {
+ return PacketEncodingError{fmt.Sprintf("invalid timestamp (%v)", m.Timestamp)}
+ }
+
+ pe.putInt64(timestamp)
+ }
+
+ err := pe.putBytes(m.Key)
+ if err != nil {
+ return err
+ }
+
+ var payload []byte
+
+ if m.compressedCache != nil {
+ payload = m.compressedCache
+ m.compressedCache = nil
+ } else if m.Value != nil {
+ switch m.Codec {
+ case CompressionNone:
+ payload = m.Value
+ case CompressionGZIP:
+ var buf bytes.Buffer
+ writer := gzip.NewWriter(&buf)
+ if _, err = writer.Write(m.Value); err != nil {
+ return err
+ }
+ if err = writer.Close(); err != nil {
+ return err
+ }
+ m.compressedCache = buf.Bytes()
+ payload = m.compressedCache
+ case CompressionSnappy:
+ tmp := snappy.Encode(m.Value)
+ m.compressedCache = tmp
+ payload = m.compressedCache
+ case CompressionLZ4:
+ var buf bytes.Buffer
+ writer := lz4.NewWriter(&buf)
+ if _, err = writer.Write(m.Value); err != nil {
+ return err
+ }
+ if err = writer.Close(); err != nil {
+ return err
+ }
+ m.compressedCache = buf.Bytes()
+ payload = m.compressedCache
+
+ default:
+ return PacketEncodingError{fmt.Sprintf("unsupported compression codec (%d)", m.Codec)}
+ }
+ // Keep in mind the compressed payload size for metric gathering
+ m.compressedSize = len(payload)
+ }
+
+ if err = pe.putBytes(payload); err != nil {
+ return err
+ }
+
+ return pe.pop()
+}
+
+func (m *Message) decode(pd packetDecoder) (err error) {
+ err = pd.push(&crc32Field{})
+ if err != nil {
+ return err
+ }
+
+ m.Version, err = pd.getInt8()
+ if err != nil {
+ return err
+ }
+
+ attribute, err := pd.getInt8()
+ if err != nil {
+ return err
+ }
+ m.Codec = CompressionCodec(attribute & compressionCodecMask)
+
+ if m.Version >= 1 {
+ millis, err := pd.getInt64()
+ if err != nil {
+ return err
+ }
+
+ // negative timestamps are invalid, in these cases we should return
+ // a zero time
+ timestamp := time.Time{}
+ if millis >= 0 {
+ timestamp = time.Unix(millis/1000, (millis%1000)*int64(time.Millisecond))
+ }
+
+ m.Timestamp = timestamp
+ }
+
+ m.Key, err = pd.getBytes()
+ if err != nil {
+ return err
+ }
+
+ m.Value, err = pd.getBytes()
+ if err != nil {
+ return err
+ }
+
+ // Required for deep equal assertion during tests but might be useful
+ // for future metrics about the compression ratio in fetch requests
+ m.compressedSize = len(m.Value)
+
+ switch m.Codec {
+ case CompressionNone:
+ // nothing to do
+ case CompressionGZIP:
+ if m.Value == nil {
+ break
+ }
+ reader, err := gzip.NewReader(bytes.NewReader(m.Value))
+ if err != nil {
+ return err
+ }
+ if m.Value, err = ioutil.ReadAll(reader); err != nil {
+ return err
+ }
+ if err := m.decodeSet(); err != nil {
+ return err
+ }
+ case CompressionSnappy:
+ if m.Value == nil {
+ break
+ }
+ if m.Value, err = snappy.Decode(m.Value); err != nil {
+ return err
+ }
+ if err := m.decodeSet(); err != nil {
+ return err
+ }
+ case CompressionLZ4:
+ if m.Value == nil {
+ break
+ }
+ reader := lz4.NewReader(bytes.NewReader(m.Value))
+ if m.Value, err = ioutil.ReadAll(reader); err != nil {
+ return err
+ }
+ if err := m.decodeSet(); err != nil {
+ return err
+ }
+
+ default:
+ return PacketDecodingError{fmt.Sprintf("invalid compression specified (%d)", m.Codec)}
+ }
+
+ return pd.pop()
+}
+
+// decodes a message set from a previousy encoded bulk-message
+func (m *Message) decodeSet() (err error) {
+ pd := realDecoder{raw: m.Value}
+ m.Set = &MessageSet{}
+ return m.Set.decode(&pd)
+}
diff --git a/vendor/github.com/Shopify/sarama/message_set.go b/vendor/github.com/Shopify/sarama/message_set.go
new file mode 100644
index 00000000..f028784e
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/message_set.go
@@ -0,0 +1,89 @@
+package sarama
+
+type MessageBlock struct {
+ Offset int64
+ Msg *Message
+}
+
+// Messages convenience helper which returns either all the
+// messages that are wrapped in this block
+func (msb *MessageBlock) Messages() []*MessageBlock {
+ if msb.Msg.Set != nil {
+ return msb.Msg.Set.Messages
+ }
+ return []*MessageBlock{msb}
+}
+
+func (msb *MessageBlock) encode(pe packetEncoder) error {
+ pe.putInt64(msb.Offset)
+ pe.push(&lengthField{})
+ err := msb.Msg.encode(pe)
+ if err != nil {
+ return err
+ }
+ return pe.pop()
+}
+
+func (msb *MessageBlock) decode(pd packetDecoder) (err error) {
+ if msb.Offset, err = pd.getInt64(); err != nil {
+ return err
+ }
+
+ if err = pd.push(&lengthField{}); err != nil {
+ return err
+ }
+
+ msb.Msg = new(Message)
+ if err = msb.Msg.decode(pd); err != nil {
+ return err
+ }
+
+ if err = pd.pop(); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+type MessageSet struct {
+ PartialTrailingMessage bool // whether the set on the wire contained an incomplete trailing MessageBlock
+ Messages []*MessageBlock
+}
+
+func (ms *MessageSet) encode(pe packetEncoder) error {
+ for i := range ms.Messages {
+ err := ms.Messages[i].encode(pe)
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (ms *MessageSet) decode(pd packetDecoder) (err error) {
+ ms.Messages = nil
+
+ for pd.remaining() > 0 {
+ msb := new(MessageBlock)
+ err = msb.decode(pd)
+ switch err {
+ case nil:
+ ms.Messages = append(ms.Messages, msb)
+ case ErrInsufficientData:
+ // As an optimization the server is allowed to return a partial message at the
+ // end of the message set. Clients should handle this case. So we just ignore such things.
+ ms.PartialTrailingMessage = true
+ return nil
+ default:
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (ms *MessageSet) addMessage(msg *Message) {
+ block := new(MessageBlock)
+ block.Msg = msg
+ ms.Messages = append(ms.Messages, block)
+}
diff --git a/vendor/github.com/Shopify/sarama/metadata_request.go b/vendor/github.com/Shopify/sarama/metadata_request.go
new file mode 100644
index 00000000..9a26b55f
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/metadata_request.go
@@ -0,0 +1,52 @@
+package sarama
+
+type MetadataRequest struct {
+ Topics []string
+}
+
+func (r *MetadataRequest) encode(pe packetEncoder) error {
+ err := pe.putArrayLength(len(r.Topics))
+ if err != nil {
+ return err
+ }
+
+ for i := range r.Topics {
+ err = pe.putString(r.Topics[i])
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (r *MetadataRequest) decode(pd packetDecoder, version int16) error {
+ topicCount, err := pd.getArrayLength()
+ if err != nil {
+ return err
+ }
+ if topicCount == 0 {
+ return nil
+ }
+
+ r.Topics = make([]string, topicCount)
+ for i := range r.Topics {
+ topic, err := pd.getString()
+ if err != nil {
+ return err
+ }
+ r.Topics[i] = topic
+ }
+ return nil
+}
+
+func (r *MetadataRequest) key() int16 {
+ return 3
+}
+
+func (r *MetadataRequest) version() int16 {
+ return 0
+}
+
+func (r *MetadataRequest) requiredVersion() KafkaVersion {
+ return minVersion
+}
diff --git a/vendor/github.com/Shopify/sarama/metadata_response.go b/vendor/github.com/Shopify/sarama/metadata_response.go
new file mode 100644
index 00000000..f9d6a427
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/metadata_response.go
@@ -0,0 +1,239 @@
+package sarama
+
+type PartitionMetadata struct {
+ Err KError
+ ID int32
+ Leader int32
+ Replicas []int32
+ Isr []int32
+}
+
+func (pm *PartitionMetadata) decode(pd packetDecoder) (err error) {
+ tmp, err := pd.getInt16()
+ if err != nil {
+ return err
+ }
+ pm.Err = KError(tmp)
+
+ pm.ID, err = pd.getInt32()
+ if err != nil {
+ return err
+ }
+
+ pm.Leader, err = pd.getInt32()
+ if err != nil {
+ return err
+ }
+
+ pm.Replicas, err = pd.getInt32Array()
+ if err != nil {
+ return err
+ }
+
+ pm.Isr, err = pd.getInt32Array()
+ if err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (pm *PartitionMetadata) encode(pe packetEncoder) (err error) {
+ pe.putInt16(int16(pm.Err))
+ pe.putInt32(pm.ID)
+ pe.putInt32(pm.Leader)
+
+ err = pe.putInt32Array(pm.Replicas)
+ if err != nil {
+ return err
+ }
+
+ err = pe.putInt32Array(pm.Isr)
+ if err != nil {
+ return err
+ }
+
+ return nil
+}
+
+type TopicMetadata struct {
+ Err KError
+ Name string
+ Partitions []*PartitionMetadata
+}
+
+func (tm *TopicMetadata) decode(pd packetDecoder) (err error) {
+ tmp, err := pd.getInt16()
+ if err != nil {
+ return err
+ }
+ tm.Err = KError(tmp)
+
+ tm.Name, err = pd.getString()
+ if err != nil {
+ return err
+ }
+
+ n, err := pd.getArrayLength()
+ if err != nil {
+ return err
+ }
+ tm.Partitions = make([]*PartitionMetadata, n)
+ for i := 0; i < n; i++ {
+ tm.Partitions[i] = new(PartitionMetadata)
+ err = tm.Partitions[i].decode(pd)
+ if err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (tm *TopicMetadata) encode(pe packetEncoder) (err error) {
+ pe.putInt16(int16(tm.Err))
+
+ err = pe.putString(tm.Name)
+ if err != nil {
+ return err
+ }
+
+ err = pe.putArrayLength(len(tm.Partitions))
+ if err != nil {
+ return err
+ }
+
+ for _, pm := range tm.Partitions {
+ err = pm.encode(pe)
+ if err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+type MetadataResponse struct {
+ Brokers []*Broker
+ Topics []*TopicMetadata
+}
+
+func (r *MetadataResponse) decode(pd packetDecoder, version int16) (err error) {
+ n, err := pd.getArrayLength()
+ if err != nil {
+ return err
+ }
+
+ r.Brokers = make([]*Broker, n)
+ for i := 0; i < n; i++ {
+ r.Brokers[i] = new(Broker)
+ err = r.Brokers[i].decode(pd)
+ if err != nil {
+ return err
+ }
+ }
+
+ n, err = pd.getArrayLength()
+ if err != nil {
+ return err
+ }
+
+ r.Topics = make([]*TopicMetadata, n)
+ for i := 0; i < n; i++ {
+ r.Topics[i] = new(TopicMetadata)
+ err = r.Topics[i].decode(pd)
+ if err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (r *MetadataResponse) encode(pe packetEncoder) error {
+ err := pe.putArrayLength(len(r.Brokers))
+ if err != nil {
+ return err
+ }
+ for _, broker := range r.Brokers {
+ err = broker.encode(pe)
+ if err != nil {
+ return err
+ }
+ }
+
+ err = pe.putArrayLength(len(r.Topics))
+ if err != nil {
+ return err
+ }
+ for _, tm := range r.Topics {
+ err = tm.encode(pe)
+ if err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (r *MetadataResponse) key() int16 {
+ return 3
+}
+
+func (r *MetadataResponse) version() int16 {
+ return 0
+}
+
+func (r *MetadataResponse) requiredVersion() KafkaVersion {
+ return minVersion
+}
+
+// testing API
+
+func (r *MetadataResponse) AddBroker(addr string, id int32) {
+ r.Brokers = append(r.Brokers, &Broker{id: id, addr: addr})
+}
+
+func (r *MetadataResponse) AddTopic(topic string, err KError) *TopicMetadata {
+ var tmatch *TopicMetadata
+
+ for _, tm := range r.Topics {
+ if tm.Name == topic {
+ tmatch = tm
+ goto foundTopic
+ }
+ }
+
+ tmatch = new(TopicMetadata)
+ tmatch.Name = topic
+ r.Topics = append(r.Topics, tmatch)
+
+foundTopic:
+
+ tmatch.Err = err
+ return tmatch
+}
+
+func (r *MetadataResponse) AddTopicPartition(topic string, partition, brokerID int32, replicas, isr []int32, err KError) {
+ tmatch := r.AddTopic(topic, ErrNoError)
+ var pmatch *PartitionMetadata
+
+ for _, pm := range tmatch.Partitions {
+ if pm.ID == partition {
+ pmatch = pm
+ goto foundPartition
+ }
+ }
+
+ pmatch = new(PartitionMetadata)
+ pmatch.ID = partition
+ tmatch.Partitions = append(tmatch.Partitions, pmatch)
+
+foundPartition:
+
+ pmatch.Leader = brokerID
+ pmatch.Replicas = replicas
+ pmatch.Isr = isr
+ pmatch.Err = err
+
+}
diff --git a/vendor/github.com/Shopify/sarama/metrics.go b/vendor/github.com/Shopify/sarama/metrics.go
new file mode 100644
index 00000000..4869708e
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/metrics.go
@@ -0,0 +1,51 @@
+package sarama
+
+import (
+ "fmt"
+ "strings"
+
+ "github.com/rcrowley/go-metrics"
+)
+
+// Use exponentially decaying reservoir for sampling histograms with the same defaults as the Java library:
+// 1028 elements, which offers a 99.9% confidence level with a 5% margin of error assuming a normal distribution,
+// and an alpha factor of 0.015, which heavily biases the reservoir to the past 5 minutes of measurements.
+// See https://github.com/dropwizard/metrics/blob/v3.1.0/metrics-core/src/main/java/com/codahale/metrics/ExponentiallyDecayingReservoir.java#L38
+const (
+ metricsReservoirSize = 1028
+ metricsAlphaFactor = 0.015
+)
+
+func getOrRegisterHistogram(name string, r metrics.Registry) metrics.Histogram {
+ return r.GetOrRegister(name, func() metrics.Histogram {
+ return metrics.NewHistogram(metrics.NewExpDecaySample(metricsReservoirSize, metricsAlphaFactor))
+ }).(metrics.Histogram)
+}
+
+func getMetricNameForBroker(name string, broker *Broker) string {
+ // Use broker id like the Java client as it does not contain '.' or ':' characters that
+ // can be interpreted as special character by monitoring tool (e.g. Graphite)
+ return fmt.Sprintf(name+"-for-broker-%d", broker.ID())
+}
+
+func getOrRegisterBrokerMeter(name string, broker *Broker, r metrics.Registry) metrics.Meter {
+ return metrics.GetOrRegisterMeter(getMetricNameForBroker(name, broker), r)
+}
+
+func getOrRegisterBrokerHistogram(name string, broker *Broker, r metrics.Registry) metrics.Histogram {
+ return getOrRegisterHistogram(getMetricNameForBroker(name, broker), r)
+}
+
+func getMetricNameForTopic(name string, topic string) string {
+ // Convert dot to _ since reporters like Graphite typically use dot to represent hierarchy
+ // cf. KAFKA-1902 and KAFKA-2337
+ return fmt.Sprintf(name+"-for-topic-%s", strings.Replace(topic, ".", "_", -1))
+}
+
+func getOrRegisterTopicMeter(name string, topic string, r metrics.Registry) metrics.Meter {
+ return metrics.GetOrRegisterMeter(getMetricNameForTopic(name, topic), r)
+}
+
+func getOrRegisterTopicHistogram(name string, topic string, r metrics.Registry) metrics.Histogram {
+ return getOrRegisterHistogram(getMetricNameForTopic(name, topic), r)
+}
diff --git a/vendor/github.com/Shopify/sarama/mockbroker.go b/vendor/github.com/Shopify/sarama/mockbroker.go
new file mode 100644
index 00000000..0734d34f
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/mockbroker.go
@@ -0,0 +1,324 @@
+package sarama
+
+import (
+ "bytes"
+ "encoding/binary"
+ "fmt"
+ "io"
+ "net"
+ "reflect"
+ "strconv"
+ "sync"
+ "time"
+
+ "github.com/davecgh/go-spew/spew"
+)
+
+const (
+ expectationTimeout = 500 * time.Millisecond
+)
+
+type requestHandlerFunc func(req *request) (res encoder)
+
+// RequestNotifierFunc is invoked when a mock broker processes a request successfully
+// and will provides the number of bytes read and written.
+type RequestNotifierFunc func(bytesRead, bytesWritten int)
+
+// MockBroker is a mock Kafka broker that is used in unit tests. It is exposed
+// to facilitate testing of higher level or specialized consumers and producers
+// built on top of Sarama. Note that it does not 'mimic' the Kafka API protocol,
+// but rather provides a facility to do that. It takes care of the TCP
+// transport, request unmarshaling, response marshaling, and makes it the test
+// writer responsibility to program correct according to the Kafka API protocol
+// MockBroker behaviour.
+//
+// MockBroker is implemented as a TCP server listening on a kernel-selected
+// localhost port that can accept many connections. It reads Kafka requests
+// from that connection and returns responses programmed by the SetHandlerByMap
+// function. If a MockBroker receives a request that it has no programmed
+// response for, then it returns nothing and the request times out.
+//
+// A set of MockRequest builders to define mappings used by MockBroker is
+// provided by Sarama. But users can develop MockRequests of their own and use
+// them along with or instead of the standard ones.
+//
+// When running tests with MockBroker it is strongly recommended to specify
+// a timeout to `go test` so that if the broker hangs waiting for a response,
+// the test panics.
+//
+// It is not necessary to prefix message length or correlation ID to your
+// response bytes, the server does that automatically as a convenience.
+type MockBroker struct {
+ brokerID int32
+ port int32
+ closing chan none
+ stopper chan none
+ expectations chan encoder
+ listener net.Listener
+ t TestReporter
+ latency time.Duration
+ handler requestHandlerFunc
+ notifier RequestNotifierFunc
+ history []RequestResponse
+ lock sync.Mutex
+}
+
+// RequestResponse represents a Request/Response pair processed by MockBroker.
+type RequestResponse struct {
+ Request protocolBody
+ Response encoder
+}
+
+// SetLatency makes broker pause for the specified period every time before
+// replying.
+func (b *MockBroker) SetLatency(latency time.Duration) {
+ b.latency = latency
+}
+
+// SetHandlerByMap defines mapping of Request types to MockResponses. When a
+// request is received by the broker, it looks up the request type in the map
+// and uses the found MockResponse instance to generate an appropriate reply.
+// If the request type is not found in the map then nothing is sent.
+func (b *MockBroker) SetHandlerByMap(handlerMap map[string]MockResponse) {
+ b.setHandler(func(req *request) (res encoder) {
+ reqTypeName := reflect.TypeOf(req.body).Elem().Name()
+ mockResponse := handlerMap[reqTypeName]
+ if mockResponse == nil {
+ return nil
+ }
+ return mockResponse.For(req.body)
+ })
+}
+
+// SetNotifier set a function that will get invoked whenever a request has been
+// processed successfully and will provide the number of bytes read and written
+func (b *MockBroker) SetNotifier(notifier RequestNotifierFunc) {
+ b.lock.Lock()
+ b.notifier = notifier
+ b.lock.Unlock()
+}
+
+// BrokerID returns broker ID assigned to the broker.
+func (b *MockBroker) BrokerID() int32 {
+ return b.brokerID
+}
+
+// History returns a slice of RequestResponse pairs in the order they were
+// processed by the broker. Note that in case of multiple connections to the
+// broker the order expected by a test can be different from the order recorded
+// in the history, unless some synchronization is implemented in the test.
+func (b *MockBroker) History() []RequestResponse {
+ b.lock.Lock()
+ history := make([]RequestResponse, len(b.history))
+ copy(history, b.history)
+ b.lock.Unlock()
+ return history
+}
+
+// Port returns the TCP port number the broker is listening for requests on.
+func (b *MockBroker) Port() int32 {
+ return b.port
+}
+
+// Addr returns the broker connection string in the form "
:".
+func (b *MockBroker) Addr() string {
+ return b.listener.Addr().String()
+}
+
+// Close terminates the broker blocking until it stops internal goroutines and
+// releases all resources.
+func (b *MockBroker) Close() {
+ close(b.expectations)
+ if len(b.expectations) > 0 {
+ buf := bytes.NewBufferString(fmt.Sprintf("mockbroker/%d: not all expectations were satisfied! Still waiting on:\n", b.BrokerID()))
+ for e := range b.expectations {
+ _, _ = buf.WriteString(spew.Sdump(e))
+ }
+ b.t.Error(buf.String())
+ }
+ close(b.closing)
+ <-b.stopper
+}
+
+// setHandler sets the specified function as the request handler. Whenever
+// a mock broker reads a request from the wire it passes the request to the
+// function and sends back whatever the handler function returns.
+func (b *MockBroker) setHandler(handler requestHandlerFunc) {
+ b.lock.Lock()
+ b.handler = handler
+ b.lock.Unlock()
+}
+
+func (b *MockBroker) serverLoop() {
+ defer close(b.stopper)
+ var err error
+ var conn net.Conn
+
+ go func() {
+ <-b.closing
+ err := b.listener.Close()
+ if err != nil {
+ b.t.Error(err)
+ }
+ }()
+
+ wg := &sync.WaitGroup{}
+ i := 0
+ for conn, err = b.listener.Accept(); err == nil; conn, err = b.listener.Accept() {
+ wg.Add(1)
+ go b.handleRequests(conn, i, wg)
+ i++
+ }
+ wg.Wait()
+ Logger.Printf("*** mockbroker/%d: listener closed, err=%v", b.BrokerID(), err)
+}
+
+func (b *MockBroker) handleRequests(conn net.Conn, idx int, wg *sync.WaitGroup) {
+ defer wg.Done()
+ defer func() {
+ _ = conn.Close()
+ }()
+ Logger.Printf("*** mockbroker/%d/%d: connection opened", b.BrokerID(), idx)
+ var err error
+
+ abort := make(chan none)
+ defer close(abort)
+ go func() {
+ select {
+ case <-b.closing:
+ _ = conn.Close()
+ case <-abort:
+ }
+ }()
+
+ resHeader := make([]byte, 8)
+ for {
+ req, bytesRead, err := decodeRequest(conn)
+ if err != nil {
+ Logger.Printf("*** mockbroker/%d/%d: invalid request: err=%+v, %+v", b.brokerID, idx, err, spew.Sdump(req))
+ b.serverError(err)
+ break
+ }
+
+ if b.latency > 0 {
+ time.Sleep(b.latency)
+ }
+
+ b.lock.Lock()
+ res := b.handler(req)
+ b.history = append(b.history, RequestResponse{req.body, res})
+ b.lock.Unlock()
+
+ if res == nil {
+ Logger.Printf("*** mockbroker/%d/%d: ignored %v", b.brokerID, idx, spew.Sdump(req))
+ continue
+ }
+ Logger.Printf("*** mockbroker/%d/%d: served %v -> %v", b.brokerID, idx, req, res)
+
+ encodedRes, err := encode(res, nil)
+ if err != nil {
+ b.serverError(err)
+ break
+ }
+ if len(encodedRes) == 0 {
+ b.lock.Lock()
+ if b.notifier != nil {
+ b.notifier(bytesRead, 0)
+ }
+ b.lock.Unlock()
+ continue
+ }
+
+ binary.BigEndian.PutUint32(resHeader, uint32(len(encodedRes)+4))
+ binary.BigEndian.PutUint32(resHeader[4:], uint32(req.correlationID))
+ if _, err = conn.Write(resHeader); err != nil {
+ b.serverError(err)
+ break
+ }
+ if _, err = conn.Write(encodedRes); err != nil {
+ b.serverError(err)
+ break
+ }
+
+ b.lock.Lock()
+ if b.notifier != nil {
+ b.notifier(bytesRead, len(resHeader)+len(encodedRes))
+ }
+ b.lock.Unlock()
+ }
+ Logger.Printf("*** mockbroker/%d/%d: connection closed, err=%v", b.BrokerID(), idx, err)
+}
+
+func (b *MockBroker) defaultRequestHandler(req *request) (res encoder) {
+ select {
+ case res, ok := <-b.expectations:
+ if !ok {
+ return nil
+ }
+ return res
+ case <-time.After(expectationTimeout):
+ return nil
+ }
+}
+
+func (b *MockBroker) serverError(err error) {
+ isConnectionClosedError := false
+ if _, ok := err.(*net.OpError); ok {
+ isConnectionClosedError = true
+ } else if err == io.EOF {
+ isConnectionClosedError = true
+ } else if err.Error() == "use of closed network connection" {
+ isConnectionClosedError = true
+ }
+
+ if isConnectionClosedError {
+ return
+ }
+
+ b.t.Errorf(err.Error())
+}
+
+// NewMockBroker launches a fake Kafka broker. It takes a TestReporter as provided by the
+// test framework and a channel of responses to use. If an error occurs it is
+// simply logged to the TestReporter and the broker exits.
+func NewMockBroker(t TestReporter, brokerID int32) *MockBroker {
+ return NewMockBrokerAddr(t, brokerID, "localhost:0")
+}
+
+// NewMockBrokerAddr behaves like newMockBroker but listens on the address you give
+// it rather than just some ephemeral port.
+func NewMockBrokerAddr(t TestReporter, brokerID int32, addr string) *MockBroker {
+ var err error
+
+ broker := &MockBroker{
+ closing: make(chan none),
+ stopper: make(chan none),
+ t: t,
+ brokerID: brokerID,
+ expectations: make(chan encoder, 512),
+ }
+ broker.handler = broker.defaultRequestHandler
+
+ broker.listener, err = net.Listen("tcp", addr)
+ if err != nil {
+ t.Fatal(err)
+ }
+ Logger.Printf("*** mockbroker/%d listening on %s\n", brokerID, broker.listener.Addr().String())
+ _, portStr, err := net.SplitHostPort(broker.listener.Addr().String())
+ if err != nil {
+ t.Fatal(err)
+ }
+ tmp, err := strconv.ParseInt(portStr, 10, 32)
+ if err != nil {
+ t.Fatal(err)
+ }
+ broker.port = int32(tmp)
+
+ go broker.serverLoop()
+
+ return broker
+}
+
+func (b *MockBroker) Returns(e encoder) {
+ b.expectations <- e
+}
diff --git a/vendor/github.com/Shopify/sarama/mockresponses.go b/vendor/github.com/Shopify/sarama/mockresponses.go
new file mode 100644
index 00000000..a2031420
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/mockresponses.go
@@ -0,0 +1,455 @@
+package sarama
+
+import (
+ "fmt"
+)
+
+// TestReporter has methods matching go's testing.T to avoid importing
+// `testing` in the main part of the library.
+type TestReporter interface {
+ Error(...interface{})
+ Errorf(string, ...interface{})
+ Fatal(...interface{})
+ Fatalf(string, ...interface{})
+}
+
+// MockResponse is a response builder interface it defines one method that
+// allows generating a response based on a request body. MockResponses are used
+// to program behavior of MockBroker in tests.
+type MockResponse interface {
+ For(reqBody versionedDecoder) (res encoder)
+}
+
+// MockWrapper is a mock response builder that returns a particular concrete
+// response regardless of the actual request passed to the `For` method.
+type MockWrapper struct {
+ res encoder
+}
+
+func (mw *MockWrapper) For(reqBody versionedDecoder) (res encoder) {
+ return mw.res
+}
+
+func NewMockWrapper(res encoder) *MockWrapper {
+ return &MockWrapper{res: res}
+}
+
+// MockSequence is a mock response builder that is created from a sequence of
+// concrete responses. Every time when a `MockBroker` calls its `For` method
+// the next response from the sequence is returned. When the end of the
+// sequence is reached the last element from the sequence is returned.
+type MockSequence struct {
+ responses []MockResponse
+}
+
+func NewMockSequence(responses ...interface{}) *MockSequence {
+ ms := &MockSequence{}
+ ms.responses = make([]MockResponse, len(responses))
+ for i, res := range responses {
+ switch res := res.(type) {
+ case MockResponse:
+ ms.responses[i] = res
+ case encoder:
+ ms.responses[i] = NewMockWrapper(res)
+ default:
+ panic(fmt.Sprintf("Unexpected response type: %T", res))
+ }
+ }
+ return ms
+}
+
+func (mc *MockSequence) For(reqBody versionedDecoder) (res encoder) {
+ res = mc.responses[0].For(reqBody)
+ if len(mc.responses) > 1 {
+ mc.responses = mc.responses[1:]
+ }
+ return res
+}
+
+// MockMetadataResponse is a `MetadataResponse` builder.
+type MockMetadataResponse struct {
+ leaders map[string]map[int32]int32
+ brokers map[string]int32
+ t TestReporter
+}
+
+func NewMockMetadataResponse(t TestReporter) *MockMetadataResponse {
+ return &MockMetadataResponse{
+ leaders: make(map[string]map[int32]int32),
+ brokers: make(map[string]int32),
+ t: t,
+ }
+}
+
+func (mmr *MockMetadataResponse) SetLeader(topic string, partition, brokerID int32) *MockMetadataResponse {
+ partitions := mmr.leaders[topic]
+ if partitions == nil {
+ partitions = make(map[int32]int32)
+ mmr.leaders[topic] = partitions
+ }
+ partitions[partition] = brokerID
+ return mmr
+}
+
+func (mmr *MockMetadataResponse) SetBroker(addr string, brokerID int32) *MockMetadataResponse {
+ mmr.brokers[addr] = brokerID
+ return mmr
+}
+
+func (mmr *MockMetadataResponse) For(reqBody versionedDecoder) encoder {
+ metadataRequest := reqBody.(*MetadataRequest)
+ metadataResponse := &MetadataResponse{}
+ for addr, brokerID := range mmr.brokers {
+ metadataResponse.AddBroker(addr, brokerID)
+ }
+ if len(metadataRequest.Topics) == 0 {
+ for topic, partitions := range mmr.leaders {
+ for partition, brokerID := range partitions {
+ metadataResponse.AddTopicPartition(topic, partition, brokerID, nil, nil, ErrNoError)
+ }
+ }
+ return metadataResponse
+ }
+ for _, topic := range metadataRequest.Topics {
+ for partition, brokerID := range mmr.leaders[topic] {
+ metadataResponse.AddTopicPartition(topic, partition, brokerID, nil, nil, ErrNoError)
+ }
+ }
+ return metadataResponse
+}
+
+// MockOffsetResponse is an `OffsetResponse` builder.
+type MockOffsetResponse struct {
+ offsets map[string]map[int32]map[int64]int64
+ t TestReporter
+}
+
+func NewMockOffsetResponse(t TestReporter) *MockOffsetResponse {
+ return &MockOffsetResponse{
+ offsets: make(map[string]map[int32]map[int64]int64),
+ t: t,
+ }
+}
+
+func (mor *MockOffsetResponse) SetOffset(topic string, partition int32, time, offset int64) *MockOffsetResponse {
+ partitions := mor.offsets[topic]
+ if partitions == nil {
+ partitions = make(map[int32]map[int64]int64)
+ mor.offsets[topic] = partitions
+ }
+ times := partitions[partition]
+ if times == nil {
+ times = make(map[int64]int64)
+ partitions[partition] = times
+ }
+ times[time] = offset
+ return mor
+}
+
+func (mor *MockOffsetResponse) For(reqBody versionedDecoder) encoder {
+ offsetRequest := reqBody.(*OffsetRequest)
+ offsetResponse := &OffsetResponse{}
+ for topic, partitions := range offsetRequest.blocks {
+ for partition, block := range partitions {
+ offset := mor.getOffset(topic, partition, block.time)
+ offsetResponse.AddTopicPartition(topic, partition, offset)
+ }
+ }
+ return offsetResponse
+}
+
+func (mor *MockOffsetResponse) getOffset(topic string, partition int32, time int64) int64 {
+ partitions := mor.offsets[topic]
+ if partitions == nil {
+ mor.t.Errorf("missing topic: %s", topic)
+ }
+ times := partitions[partition]
+ if times == nil {
+ mor.t.Errorf("missing partition: %d", partition)
+ }
+ offset, ok := times[time]
+ if !ok {
+ mor.t.Errorf("missing time: %d", time)
+ }
+ return offset
+}
+
+// MockFetchResponse is a `FetchResponse` builder.
+type MockFetchResponse struct {
+ messages map[string]map[int32]map[int64]Encoder
+ highWaterMarks map[string]map[int32]int64
+ t TestReporter
+ batchSize int
+}
+
+func NewMockFetchResponse(t TestReporter, batchSize int) *MockFetchResponse {
+ return &MockFetchResponse{
+ messages: make(map[string]map[int32]map[int64]Encoder),
+ highWaterMarks: make(map[string]map[int32]int64),
+ t: t,
+ batchSize: batchSize,
+ }
+}
+
+func (mfr *MockFetchResponse) SetMessage(topic string, partition int32, offset int64, msg Encoder) *MockFetchResponse {
+ partitions := mfr.messages[topic]
+ if partitions == nil {
+ partitions = make(map[int32]map[int64]Encoder)
+ mfr.messages[topic] = partitions
+ }
+ messages := partitions[partition]
+ if messages == nil {
+ messages = make(map[int64]Encoder)
+ partitions[partition] = messages
+ }
+ messages[offset] = msg
+ return mfr
+}
+
+func (mfr *MockFetchResponse) SetHighWaterMark(topic string, partition int32, offset int64) *MockFetchResponse {
+ partitions := mfr.highWaterMarks[topic]
+ if partitions == nil {
+ partitions = make(map[int32]int64)
+ mfr.highWaterMarks[topic] = partitions
+ }
+ partitions[partition] = offset
+ return mfr
+}
+
+func (mfr *MockFetchResponse) For(reqBody versionedDecoder) encoder {
+ fetchRequest := reqBody.(*FetchRequest)
+ res := &FetchResponse{}
+ for topic, partitions := range fetchRequest.blocks {
+ for partition, block := range partitions {
+ initialOffset := block.fetchOffset
+ offset := initialOffset
+ maxOffset := initialOffset + int64(mfr.getMessageCount(topic, partition))
+ for i := 0; i < mfr.batchSize && offset < maxOffset; {
+ msg := mfr.getMessage(topic, partition, offset)
+ if msg != nil {
+ res.AddMessage(topic, partition, nil, msg, offset)
+ i++
+ }
+ offset++
+ }
+ fb := res.GetBlock(topic, partition)
+ if fb == nil {
+ res.AddError(topic, partition, ErrNoError)
+ fb = res.GetBlock(topic, partition)
+ }
+ fb.HighWaterMarkOffset = mfr.getHighWaterMark(topic, partition)
+ }
+ }
+ return res
+}
+
+func (mfr *MockFetchResponse) getMessage(topic string, partition int32, offset int64) Encoder {
+ partitions := mfr.messages[topic]
+ if partitions == nil {
+ return nil
+ }
+ messages := partitions[partition]
+ if messages == nil {
+ return nil
+ }
+ return messages[offset]
+}
+
+func (mfr *MockFetchResponse) getMessageCount(topic string, partition int32) int {
+ partitions := mfr.messages[topic]
+ if partitions == nil {
+ return 0
+ }
+ messages := partitions[partition]
+ if messages == nil {
+ return 0
+ }
+ return len(messages)
+}
+
+func (mfr *MockFetchResponse) getHighWaterMark(topic string, partition int32) int64 {
+ partitions := mfr.highWaterMarks[topic]
+ if partitions == nil {
+ return 0
+ }
+ return partitions[partition]
+}
+
+// MockConsumerMetadataResponse is a `ConsumerMetadataResponse` builder.
+type MockConsumerMetadataResponse struct {
+ coordinators map[string]interface{}
+ t TestReporter
+}
+
+func NewMockConsumerMetadataResponse(t TestReporter) *MockConsumerMetadataResponse {
+ return &MockConsumerMetadataResponse{
+ coordinators: make(map[string]interface{}),
+ t: t,
+ }
+}
+
+func (mr *MockConsumerMetadataResponse) SetCoordinator(group string, broker *MockBroker) *MockConsumerMetadataResponse {
+ mr.coordinators[group] = broker
+ return mr
+}
+
+func (mr *MockConsumerMetadataResponse) SetError(group string, kerror KError) *MockConsumerMetadataResponse {
+ mr.coordinators[group] = kerror
+ return mr
+}
+
+func (mr *MockConsumerMetadataResponse) For(reqBody versionedDecoder) encoder {
+ req := reqBody.(*ConsumerMetadataRequest)
+ group := req.ConsumerGroup
+ res := &ConsumerMetadataResponse{}
+ v := mr.coordinators[group]
+ switch v := v.(type) {
+ case *MockBroker:
+ res.Coordinator = &Broker{id: v.BrokerID(), addr: v.Addr()}
+ case KError:
+ res.Err = v
+ }
+ return res
+}
+
+// MockOffsetCommitResponse is a `OffsetCommitResponse` builder.
+type MockOffsetCommitResponse struct {
+ errors map[string]map[string]map[int32]KError
+ t TestReporter
+}
+
+func NewMockOffsetCommitResponse(t TestReporter) *MockOffsetCommitResponse {
+ return &MockOffsetCommitResponse{t: t}
+}
+
+func (mr *MockOffsetCommitResponse) SetError(group, topic string, partition int32, kerror KError) *MockOffsetCommitResponse {
+ if mr.errors == nil {
+ mr.errors = make(map[string]map[string]map[int32]KError)
+ }
+ topics := mr.errors[group]
+ if topics == nil {
+ topics = make(map[string]map[int32]KError)
+ mr.errors[group] = topics
+ }
+ partitions := topics[topic]
+ if partitions == nil {
+ partitions = make(map[int32]KError)
+ topics[topic] = partitions
+ }
+ partitions[partition] = kerror
+ return mr
+}
+
+func (mr *MockOffsetCommitResponse) For(reqBody versionedDecoder) encoder {
+ req := reqBody.(*OffsetCommitRequest)
+ group := req.ConsumerGroup
+ res := &OffsetCommitResponse{}
+ for topic, partitions := range req.blocks {
+ for partition := range partitions {
+ res.AddError(topic, partition, mr.getError(group, topic, partition))
+ }
+ }
+ return res
+}
+
+func (mr *MockOffsetCommitResponse) getError(group, topic string, partition int32) KError {
+ topics := mr.errors[group]
+ if topics == nil {
+ return ErrNoError
+ }
+ partitions := topics[topic]
+ if partitions == nil {
+ return ErrNoError
+ }
+ kerror, ok := partitions[partition]
+ if !ok {
+ return ErrNoError
+ }
+ return kerror
+}
+
+// MockProduceResponse is a `ProduceResponse` builder.
+type MockProduceResponse struct {
+ errors map[string]map[int32]KError
+ t TestReporter
+}
+
+func NewMockProduceResponse(t TestReporter) *MockProduceResponse {
+ return &MockProduceResponse{t: t}
+}
+
+func (mr *MockProduceResponse) SetError(topic string, partition int32, kerror KError) *MockProduceResponse {
+ if mr.errors == nil {
+ mr.errors = make(map[string]map[int32]KError)
+ }
+ partitions := mr.errors[topic]
+ if partitions == nil {
+ partitions = make(map[int32]KError)
+ mr.errors[topic] = partitions
+ }
+ partitions[partition] = kerror
+ return mr
+}
+
+func (mr *MockProduceResponse) For(reqBody versionedDecoder) encoder {
+ req := reqBody.(*ProduceRequest)
+ res := &ProduceResponse{}
+ for topic, partitions := range req.msgSets {
+ for partition := range partitions {
+ res.AddTopicPartition(topic, partition, mr.getError(topic, partition))
+ }
+ }
+ return res
+}
+
+func (mr *MockProduceResponse) getError(topic string, partition int32) KError {
+ partitions := mr.errors[topic]
+ if partitions == nil {
+ return ErrNoError
+ }
+ kerror, ok := partitions[partition]
+ if !ok {
+ return ErrNoError
+ }
+ return kerror
+}
+
+// MockOffsetFetchResponse is a `OffsetFetchResponse` builder.
+type MockOffsetFetchResponse struct {
+ offsets map[string]map[string]map[int32]*OffsetFetchResponseBlock
+ t TestReporter
+}
+
+func NewMockOffsetFetchResponse(t TestReporter) *MockOffsetFetchResponse {
+ return &MockOffsetFetchResponse{t: t}
+}
+
+func (mr *MockOffsetFetchResponse) SetOffset(group, topic string, partition int32, offset int64, metadata string, kerror KError) *MockOffsetFetchResponse {
+ if mr.offsets == nil {
+ mr.offsets = make(map[string]map[string]map[int32]*OffsetFetchResponseBlock)
+ }
+ topics := mr.offsets[group]
+ if topics == nil {
+ topics = make(map[string]map[int32]*OffsetFetchResponseBlock)
+ mr.offsets[group] = topics
+ }
+ partitions := topics[topic]
+ if partitions == nil {
+ partitions = make(map[int32]*OffsetFetchResponseBlock)
+ topics[topic] = partitions
+ }
+ partitions[partition] = &OffsetFetchResponseBlock{offset, metadata, kerror}
+ return mr
+}
+
+func (mr *MockOffsetFetchResponse) For(reqBody versionedDecoder) encoder {
+ req := reqBody.(*OffsetFetchRequest)
+ group := req.ConsumerGroup
+ res := &OffsetFetchResponse{}
+ for topic, partitions := range mr.offsets[group] {
+ for partition, block := range partitions {
+ res.AddBlock(topic, partition, block)
+ }
+ }
+ return res
+}
diff --git a/vendor/github.com/Shopify/sarama/offset_commit_request.go b/vendor/github.com/Shopify/sarama/offset_commit_request.go
new file mode 100644
index 00000000..b21ea634
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/offset_commit_request.go
@@ -0,0 +1,190 @@
+package sarama
+
+// ReceiveTime is a special value for the timestamp field of Offset Commit Requests which
+// tells the broker to set the timestamp to the time at which the request was received.
+// The timestamp is only used if message version 1 is used, which requires kafka 0.8.2.
+const ReceiveTime int64 = -1
+
+// GroupGenerationUndefined is a special value for the group generation field of
+// Offset Commit Requests that should be used when a consumer group does not rely
+// on Kafka for partition management.
+const GroupGenerationUndefined = -1
+
+type offsetCommitRequestBlock struct {
+ offset int64
+ timestamp int64
+ metadata string
+}
+
+func (b *offsetCommitRequestBlock) encode(pe packetEncoder, version int16) error {
+ pe.putInt64(b.offset)
+ if version == 1 {
+ pe.putInt64(b.timestamp)
+ } else if b.timestamp != 0 {
+ Logger.Println("Non-zero timestamp specified for OffsetCommitRequest not v1, it will be ignored")
+ }
+
+ return pe.putString(b.metadata)
+}
+
+func (b *offsetCommitRequestBlock) decode(pd packetDecoder, version int16) (err error) {
+ if b.offset, err = pd.getInt64(); err != nil {
+ return err
+ }
+ if version == 1 {
+ if b.timestamp, err = pd.getInt64(); err != nil {
+ return err
+ }
+ }
+ b.metadata, err = pd.getString()
+ return err
+}
+
+type OffsetCommitRequest struct {
+ ConsumerGroup string
+ ConsumerGroupGeneration int32 // v1 or later
+ ConsumerID string // v1 or later
+ RetentionTime int64 // v2 or later
+
+ // Version can be:
+ // - 0 (kafka 0.8.1 and later)
+ // - 1 (kafka 0.8.2 and later)
+ // - 2 (kafka 0.9.0 and later)
+ Version int16
+ blocks map[string]map[int32]*offsetCommitRequestBlock
+}
+
+func (r *OffsetCommitRequest) encode(pe packetEncoder) error {
+ if r.Version < 0 || r.Version > 2 {
+ return PacketEncodingError{"invalid or unsupported OffsetCommitRequest version field"}
+ }
+
+ if err := pe.putString(r.ConsumerGroup); err != nil {
+ return err
+ }
+
+ if r.Version >= 1 {
+ pe.putInt32(r.ConsumerGroupGeneration)
+ if err := pe.putString(r.ConsumerID); err != nil {
+ return err
+ }
+ } else {
+ if r.ConsumerGroupGeneration != 0 {
+ Logger.Println("Non-zero ConsumerGroupGeneration specified for OffsetCommitRequest v0, it will be ignored")
+ }
+ if r.ConsumerID != "" {
+ Logger.Println("Non-empty ConsumerID specified for OffsetCommitRequest v0, it will be ignored")
+ }
+ }
+
+ if r.Version >= 2 {
+ pe.putInt64(r.RetentionTime)
+ } else if r.RetentionTime != 0 {
+ Logger.Println("Non-zero RetentionTime specified for OffsetCommitRequest version <2, it will be ignored")
+ }
+
+ if err := pe.putArrayLength(len(r.blocks)); err != nil {
+ return err
+ }
+ for topic, partitions := range r.blocks {
+ if err := pe.putString(topic); err != nil {
+ return err
+ }
+ if err := pe.putArrayLength(len(partitions)); err != nil {
+ return err
+ }
+ for partition, block := range partitions {
+ pe.putInt32(partition)
+ if err := block.encode(pe, r.Version); err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+func (r *OffsetCommitRequest) decode(pd packetDecoder, version int16) (err error) {
+ r.Version = version
+
+ if r.ConsumerGroup, err = pd.getString(); err != nil {
+ return err
+ }
+
+ if r.Version >= 1 {
+ if r.ConsumerGroupGeneration, err = pd.getInt32(); err != nil {
+ return err
+ }
+ if r.ConsumerID, err = pd.getString(); err != nil {
+ return err
+ }
+ }
+
+ if r.Version >= 2 {
+ if r.RetentionTime, err = pd.getInt64(); err != nil {
+ return err
+ }
+ }
+
+ topicCount, err := pd.getArrayLength()
+ if err != nil {
+ return err
+ }
+ if topicCount == 0 {
+ return nil
+ }
+ r.blocks = make(map[string]map[int32]*offsetCommitRequestBlock)
+ for i := 0; i < topicCount; i++ {
+ topic, err := pd.getString()
+ if err != nil {
+ return err
+ }
+ partitionCount, err := pd.getArrayLength()
+ if err != nil {
+ return err
+ }
+ r.blocks[topic] = make(map[int32]*offsetCommitRequestBlock)
+ for j := 0; j < partitionCount; j++ {
+ partition, err := pd.getInt32()
+ if err != nil {
+ return err
+ }
+ block := &offsetCommitRequestBlock{}
+ if err := block.decode(pd, r.Version); err != nil {
+ return err
+ }
+ r.blocks[topic][partition] = block
+ }
+ }
+ return nil
+}
+
+func (r *OffsetCommitRequest) key() int16 {
+ return 8
+}
+
+func (r *OffsetCommitRequest) version() int16 {
+ return r.Version
+}
+
+func (r *OffsetCommitRequest) requiredVersion() KafkaVersion {
+ switch r.Version {
+ case 1:
+ return V0_8_2_0
+ case 2:
+ return V0_9_0_0
+ default:
+ return minVersion
+ }
+}
+
+func (r *OffsetCommitRequest) AddBlock(topic string, partitionID int32, offset int64, timestamp int64, metadata string) {
+ if r.blocks == nil {
+ r.blocks = make(map[string]map[int32]*offsetCommitRequestBlock)
+ }
+
+ if r.blocks[topic] == nil {
+ r.blocks[topic] = make(map[int32]*offsetCommitRequestBlock)
+ }
+
+ r.blocks[topic][partitionID] = &offsetCommitRequestBlock{offset, timestamp, metadata}
+}
diff --git a/vendor/github.com/Shopify/sarama/offset_commit_response.go b/vendor/github.com/Shopify/sarama/offset_commit_response.go
new file mode 100644
index 00000000..7f277e77
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/offset_commit_response.go
@@ -0,0 +1,85 @@
+package sarama
+
+type OffsetCommitResponse struct {
+ Errors map[string]map[int32]KError
+}
+
+func (r *OffsetCommitResponse) AddError(topic string, partition int32, kerror KError) {
+ if r.Errors == nil {
+ r.Errors = make(map[string]map[int32]KError)
+ }
+ partitions := r.Errors[topic]
+ if partitions == nil {
+ partitions = make(map[int32]KError)
+ r.Errors[topic] = partitions
+ }
+ partitions[partition] = kerror
+}
+
+func (r *OffsetCommitResponse) encode(pe packetEncoder) error {
+ if err := pe.putArrayLength(len(r.Errors)); err != nil {
+ return err
+ }
+ for topic, partitions := range r.Errors {
+ if err := pe.putString(topic); err != nil {
+ return err
+ }
+ if err := pe.putArrayLength(len(partitions)); err != nil {
+ return err
+ }
+ for partition, kerror := range partitions {
+ pe.putInt32(partition)
+ pe.putInt16(int16(kerror))
+ }
+ }
+ return nil
+}
+
+func (r *OffsetCommitResponse) decode(pd packetDecoder, version int16) (err error) {
+ numTopics, err := pd.getArrayLength()
+ if err != nil || numTopics == 0 {
+ return err
+ }
+
+ r.Errors = make(map[string]map[int32]KError, numTopics)
+ for i := 0; i < numTopics; i++ {
+ name, err := pd.getString()
+ if err != nil {
+ return err
+ }
+
+ numErrors, err := pd.getArrayLength()
+ if err != nil {
+ return err
+ }
+
+ r.Errors[name] = make(map[int32]KError, numErrors)
+
+ for j := 0; j < numErrors; j++ {
+ id, err := pd.getInt32()
+ if err != nil {
+ return err
+ }
+
+ tmp, err := pd.getInt16()
+ if err != nil {
+ return err
+ }
+ r.Errors[name][id] = KError(tmp)
+ }
+ }
+
+ return nil
+}
+
+func (r *OffsetCommitResponse) key() int16 {
+ return 8
+}
+
+func (r *OffsetCommitResponse) version() int16 {
+ return 0
+}
+
+func (r *OffsetCommitResponse) requiredVersion() KafkaVersion {
+ return minVersion
+}
diff --git a/vendor/github.com/Shopify/sarama/offset_fetch_request.go b/vendor/github.com/Shopify/sarama/offset_fetch_request.go
new file mode 100644
index 00000000..b19fe79b
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/offset_fetch_request.go
@@ -0,0 +1,81 @@
+package sarama
+
+type OffsetFetchRequest struct {
+ ConsumerGroup string
+ Version int16
+ partitions map[string][]int32
+}
+
+func (r *OffsetFetchRequest) encode(pe packetEncoder) (err error) {
+ if r.Version < 0 || r.Version > 1 {
+ return PacketEncodingError{"invalid or unsupported OffsetFetchRequest version field"}
+ }
+
+ if err = pe.putString(r.ConsumerGroup); err != nil {
+ return err
+ }
+ if err = pe.putArrayLength(len(r.partitions)); err != nil {
+ return err
+ }
+ for topic, partitions := range r.partitions {
+ if err = pe.putString(topic); err != nil {
+ return err
+ }
+ if err = pe.putInt32Array(partitions); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (r *OffsetFetchRequest) decode(pd packetDecoder, version int16) (err error) {
+ r.Version = version
+ if r.ConsumerGroup, err = pd.getString(); err != nil {
+ return err
+ }
+ partitionCount, err := pd.getArrayLength()
+ if err != nil {
+ return err
+ }
+ if partitionCount == 0 {
+ return nil
+ }
+ r.partitions = make(map[string][]int32)
+ for i := 0; i < partitionCount; i++ {
+ topic, err := pd.getString()
+ if err != nil {
+ return err
+ }
+ partitions, err := pd.getInt32Array()
+ if err != nil {
+ return err
+ }
+ r.partitions[topic] = partitions
+ }
+ return nil
+}
+
+func (r *OffsetFetchRequest) key() int16 {
+ return 9
+}
+
+func (r *OffsetFetchRequest) version() int16 {
+ return r.Version
+}
+
+func (r *OffsetFetchRequest) requiredVersion() KafkaVersion {
+ switch r.Version {
+ case 1:
+ return V0_8_2_0
+ default:
+ return minVersion
+ }
+}
+
+func (r *OffsetFetchRequest) AddPartition(topic string, partitionID int32) {
+ if r.partitions == nil {
+ r.partitions = make(map[string][]int32)
+ }
+
+ r.partitions[topic] = append(r.partitions[topic], partitionID)
+}
diff --git a/vendor/github.com/Shopify/sarama/offset_fetch_response.go b/vendor/github.com/Shopify/sarama/offset_fetch_response.go
new file mode 100644
index 00000000..323220ea
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/offset_fetch_response.go
@@ -0,0 +1,143 @@
+package sarama
+
+type OffsetFetchResponseBlock struct {
+ Offset int64
+ Metadata string
+ Err KError
+}
+
+func (b *OffsetFetchResponseBlock) decode(pd packetDecoder) (err error) {
+ b.Offset, err = pd.getInt64()
+ if err != nil {
+ return err
+ }
+
+ b.Metadata, err = pd.getString()
+ if err != nil {
+ return err
+ }
+
+ tmp, err := pd.getInt16()
+ if err != nil {
+ return err
+ }
+ b.Err = KError(tmp)
+
+ return nil
+}
+
+func (b *OffsetFetchResponseBlock) encode(pe packetEncoder) (err error) {
+ pe.putInt64(b.Offset)
+
+ err = pe.putString(b.Metadata)
+ if err != nil {
+ return err
+ }
+
+ pe.putInt16(int16(b.Err))
+
+ return nil
+}
+
+type OffsetFetchResponse struct {
+ Blocks map[string]map[int32]*OffsetFetchResponseBlock
+}
+
+func (r *OffsetFetchResponse) encode(pe packetEncoder) error {
+ if err := pe.putArrayLength(len(r.Blocks)); err != nil {
+ return err
+ }
+ for topic, partitions := range r.Blocks {
+ if err := pe.putString(topic); err != nil {
+ return err
+ }
+ if err := pe.putArrayLength(len(partitions)); err != nil {
+ return err
+ }
+ for partition, block := range partitions {
+ pe.putInt32(partition)
+ if err := block.encode(pe); err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+func (r *OffsetFetchResponse) decode(pd packetDecoder, version int16) (err error) {
+ numTopics, err := pd.getArrayLength()
+ if err != nil || numTopics == 0 {
+ return err
+ }
+
+ r.Blocks = make(map[string]map[int32]*OffsetFetchResponseBlock, numTopics)
+ for i := 0; i < numTopics; i++ {
+ name, err := pd.getString()
+ if err != nil {
+ return err
+ }
+
+ numBlocks, err := pd.getArrayLength()
+ if err != nil {
+ return err
+ }
+
+ if numBlocks == 0 {
+ r.Blocks[name] = nil
+ continue
+ }
+ r.Blocks[name] = make(map[int32]*OffsetFetchResponseBlock, numBlocks)
+
+ for j := 0; j < numBlocks; j++ {
+ id, err := pd.getInt32()
+ if err != nil {
+ return err
+ }
+
+ block := new(OffsetFetchResponseBlock)
+ err = block.decode(pd)
+ if err != nil {
+ return err
+ }
+ r.Blocks[name][id] = block
+ }
+ }
+
+ return nil
+}
+
+func (r *OffsetFetchResponse) key() int16 {
+ return 9
+}
+
+func (r *OffsetFetchResponse) version() int16 {
+ return 0
+}
+
+func (r *OffsetFetchResponse) requiredVersion() KafkaVersion {
+ return minVersion
+}
+
+func (r *OffsetFetchResponse) GetBlock(topic string, partition int32) *OffsetFetchResponseBlock {
+ if r.Blocks == nil {
+ return nil
+ }
+
+ if r.Blocks[topic] == nil {
+ return nil
+ }
+
+ return r.Blocks[topic][partition]
+}
+
+func (r *OffsetFetchResponse) AddBlock(topic string, partition int32, block *OffsetFetchResponseBlock) {
+ if r.Blocks == nil {
+ r.Blocks = make(map[string]map[int32]*OffsetFetchResponseBlock)
+ }
+ partitions := r.Blocks[topic]
+ if partitions == nil {
+ partitions = make(map[int32]*OffsetFetchResponseBlock)
+ r.Blocks[topic] = partitions
+ }
+ partitions[partition] = block
+}
diff --git a/vendor/github.com/Shopify/sarama/offset_manager.go b/vendor/github.com/Shopify/sarama/offset_manager.go
new file mode 100644
index 00000000..5e15cdaf
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/offset_manager.go
@@ -0,0 +1,542 @@
+package sarama
+
+import (
+ "sync"
+ "time"
+)
+
+// Offset Manager
+
+// OffsetManager uses Kafka to store and fetch consumed partition offsets.
+type OffsetManager interface {
+ // ManagePartition creates a PartitionOffsetManager on the given topic/partition.
+ // It will return an error if this OffsetManager is already managing the given
+ // topic/partition.
+ ManagePartition(topic string, partition int32) (PartitionOffsetManager, error)
+
+ // Close stops the OffsetManager from managing offsets. It is required to call
+ // this function before an OffsetManager object passes out of scope, as it
+ // will otherwise leak memory. You must call this after all the
+ // PartitionOffsetManagers are closed.
+ Close() error
+}
+
+type offsetManager struct {
+ client Client
+ conf *Config
+ group string
+
+ lock sync.Mutex
+ poms map[string]map[int32]*partitionOffsetManager
+ boms map[*Broker]*brokerOffsetManager
+}
+
+// NewOffsetManagerFromClient creates a new OffsetManager from the given client.
+// It is still necessary to call Close() on the underlying client when finished with the partition manager.
+func NewOffsetManagerFromClient(group string, client Client) (OffsetManager, error) {
+ // Check that we are not dealing with a closed Client before processing any other arguments
+ if client.Closed() {
+ return nil, ErrClosedClient
+ }
+
+ om := &offsetManager{
+ client: client,
+ conf: client.Config(),
+ group: group,
+ poms: make(map[string]map[int32]*partitionOffsetManager),
+ boms: make(map[*Broker]*brokerOffsetManager),
+ }
+
+ return om, nil
+}
+
+func (om *offsetManager) ManagePartition(topic string, partition int32) (PartitionOffsetManager, error) {
+ pom, err := om.newPartitionOffsetManager(topic, partition)
+ if err != nil {
+ return nil, err
+ }
+
+ om.lock.Lock()
+ defer om.lock.Unlock()
+
+ topicManagers := om.poms[topic]
+ if topicManagers == nil {
+ topicManagers = make(map[int32]*partitionOffsetManager)
+ om.poms[topic] = topicManagers
+ }
+
+ if topicManagers[partition] != nil {
+ return nil, ConfigurationError("That topic/partition is already being managed")
+ }
+
+ topicManagers[partition] = pom
+ return pom, nil
+}
+
+func (om *offsetManager) Close() error {
+ return nil
+}
+
+func (om *offsetManager) refBrokerOffsetManager(broker *Broker) *brokerOffsetManager {
+ om.lock.Lock()
+ defer om.lock.Unlock()
+
+ bom := om.boms[broker]
+ if bom == nil {
+ bom = om.newBrokerOffsetManager(broker)
+ om.boms[broker] = bom
+ }
+
+ bom.refs++
+
+ return bom
+}
+
+func (om *offsetManager) unrefBrokerOffsetManager(bom *brokerOffsetManager) {
+ om.lock.Lock()
+ defer om.lock.Unlock()
+
+ bom.refs--
+
+ if bom.refs == 0 {
+ close(bom.updateSubscriptions)
+ if om.boms[bom.broker] == bom {
+ delete(om.boms, bom.broker)
+ }
+ }
+}
+
+func (om *offsetManager) abandonBroker(bom *brokerOffsetManager) {
+ om.lock.Lock()
+ defer om.lock.Unlock()
+
+ delete(om.boms, bom.broker)
+}
+
+func (om *offsetManager) abandonPartitionOffsetManager(pom *partitionOffsetManager) {
+ om.lock.Lock()
+ defer om.lock.Unlock()
+
+ delete(om.poms[pom.topic], pom.partition)
+ if len(om.poms[pom.topic]) == 0 {
+ delete(om.poms, pom.topic)
+ }
+}
+
+// Partition Offset Manager
+
+// PartitionOffsetManager uses Kafka to store and fetch consumed partition offsets. You MUST call Close()
+// on a partition offset manager to avoid leaks, it will not be garbage-collected automatically when it passes
+// out of scope.
+type PartitionOffsetManager interface {
+ // NextOffset returns the next offset that should be consumed for the managed
+ // partition, accompanied by metadata which can be used to reconstruct the state
+ // of the partition consumer when it resumes. NextOffset() will return
+ // `config.Consumer.Offsets.Initial` and an empty metadata string if no offset
+ // was committed for this partition yet.
+ NextOffset() (int64, string)
+
+ // MarkOffset marks the provided offset, alongside a metadata string
+ // that represents the state of the partition consumer at that point in time. The
+ // metadata string can be used by another consumer to restore that state, so it
+ // can resume consumption.
+ //
+ // To follow upstream conventions, you are expected to mark the offset of the
+ // next message to read, not the last message read. Thus, when calling `MarkOffset`
+ // you should typically add one to the offset of the last consumed message.
+ //
+ // Note: calling MarkOffset does not necessarily commit the offset to the backend
+ // store immediately for efficiency reasons, and it may never be committed if
+ // your application crashes. This means that you may end up processing the same
+ // message twice, and your processing should ideally be idempotent.
+ MarkOffset(offset int64, metadata string)
+
+ // Errors returns a read channel of errors that occur during offset management, if
+ // enabled. By default, errors are logged and not returned over this channel. If
+ // you want to implement any custom error handling, set your config's
+ // Consumer.Return.Errors setting to true, and read from this channel.
+ Errors() <-chan *ConsumerError
+
+ // AsyncClose initiates a shutdown of the PartitionOffsetManager. This method will
+ // return immediately, after which you should wait until the 'errors' channel has
+ // been drained and closed. It is required to call this function, or Close before
+ // a consumer object passes out of scope, as it will otherwise leak memory. You
+ // must call this before calling Close on the underlying client.
+ AsyncClose()
+
+ // Close stops the PartitionOffsetManager from managing offsets. It is required to
+ // call this function (or AsyncClose) before a PartitionOffsetManager object
+ // passes out of scope, as it will otherwise leak memory. You must call this
+ // before calling Close on the underlying client.
+ Close() error
+}
+
+type partitionOffsetManager struct {
+ parent *offsetManager
+ topic string
+ partition int32
+
+ lock sync.Mutex
+ offset int64
+ metadata string
+ dirty bool
+ clean sync.Cond
+ broker *brokerOffsetManager
+
+ errors chan *ConsumerError
+ rebalance chan none
+ dying chan none
+}
+
+func (om *offsetManager) newPartitionOffsetManager(topic string, partition int32) (*partitionOffsetManager, error) {
+ pom := &partitionOffsetManager{
+ parent: om,
+ topic: topic,
+ partition: partition,
+ errors: make(chan *ConsumerError, om.conf.ChannelBufferSize),
+ rebalance: make(chan none, 1),
+ dying: make(chan none),
+ }
+ pom.clean.L = &pom.lock
+
+ if err := pom.selectBroker(); err != nil {
+ return nil, err
+ }
+
+ if err := pom.fetchInitialOffset(om.conf.Metadata.Retry.Max); err != nil {
+ return nil, err
+ }
+
+ pom.broker.updateSubscriptions <- pom
+
+ go withRecover(pom.mainLoop)
+
+ return pom, nil
+}
+
+func (pom *partitionOffsetManager) mainLoop() {
+ for {
+ select {
+ case <-pom.rebalance:
+ if err := pom.selectBroker(); err != nil {
+ pom.handleError(err)
+ pom.rebalance <- none{}
+ } else {
+ pom.broker.updateSubscriptions <- pom
+ }
+ case <-pom.dying:
+ if pom.broker != nil {
+ select {
+ case <-pom.rebalance:
+ case pom.broker.updateSubscriptions <- pom:
+ }
+ pom.parent.unrefBrokerOffsetManager(pom.broker)
+ }
+ pom.parent.abandonPartitionOffsetManager(pom)
+ close(pom.errors)
+ return
+ }
+ }
+}
+
+func (pom *partitionOffsetManager) selectBroker() error {
+ if pom.broker != nil {
+ pom.parent.unrefBrokerOffsetManager(pom.broker)
+ pom.broker = nil
+ }
+
+ var broker *Broker
+ var err error
+
+ if err = pom.parent.client.RefreshCoordinator(pom.parent.group); err != nil {
+ return err
+ }
+
+ if broker, err = pom.parent.client.Coordinator(pom.parent.group); err != nil {
+ return err
+ }
+
+ pom.broker = pom.parent.refBrokerOffsetManager(broker)
+ return nil
+}
+
+func (pom *partitionOffsetManager) fetchInitialOffset(retries int) error {
+ request := new(OffsetFetchRequest)
+ request.Version = 1
+ request.ConsumerGroup = pom.parent.group
+ request.AddPartition(pom.topic, pom.partition)
+
+ response, err := pom.broker.broker.FetchOffset(request)
+ if err != nil {
+ return err
+ }
+
+ block := response.GetBlock(pom.topic, pom.partition)
+ if block == nil {
+ return ErrIncompleteResponse
+ }
+
+ switch block.Err {
+ case ErrNoError:
+ pom.offset = block.Offset
+ pom.metadata = block.Metadata
+ return nil
+ case ErrNotCoordinatorForConsumer:
+ if retries <= 0 {
+ return block.Err
+ }
+ if err := pom.selectBroker(); err != nil {
+ return err
+ }
+ return pom.fetchInitialOffset(retries - 1)
+ case ErrOffsetsLoadInProgress:
+ if retries <= 0 {
+ return block.Err
+ }
+ time.Sleep(pom.parent.conf.Metadata.Retry.Backoff)
+ return pom.fetchInitialOffset(retries - 1)
+ default:
+ return block.Err
+ }
+}
+
+func (pom *partitionOffsetManager) handleError(err error) {
+ cErr := &ConsumerError{
+ Topic: pom.topic,
+ Partition: pom.partition,
+ Err: err,
+ }
+
+ if pom.parent.conf.Consumer.Return.Errors {
+ pom.errors <- cErr
+ } else {
+ Logger.Println(cErr)
+ }
+}
+
+func (pom *partitionOffsetManager) Errors() <-chan *ConsumerError {
+ return pom.errors
+}
+
+func (pom *partitionOffsetManager) MarkOffset(offset int64, metadata string) {
+ pom.lock.Lock()
+ defer pom.lock.Unlock()
+
+ if offset > pom.offset {
+ pom.offset = offset
+ pom.metadata = metadata
+ pom.dirty = true
+ }
+}
+
+func (pom *partitionOffsetManager) updateCommitted(offset int64, metadata string) {
+ pom.lock.Lock()
+ defer pom.lock.Unlock()
+
+ if pom.offset == offset && pom.metadata == metadata {
+ pom.dirty = false
+ pom.clean.Signal()
+ }
+}
+
+func (pom *partitionOffsetManager) NextOffset() (int64, string) {
+ pom.lock.Lock()
+ defer pom.lock.Unlock()
+
+ if pom.offset >= 0 {
+ return pom.offset, pom.metadata
+ }
+
+ return pom.parent.conf.Consumer.Offsets.Initial, ""
+}
+
+func (pom *partitionOffsetManager) AsyncClose() {
+ go func() {
+ pom.lock.Lock()
+ defer pom.lock.Unlock()
+
+ for pom.dirty {
+ pom.clean.Wait()
+ }
+
+ close(pom.dying)
+ }()
+}
+
+func (pom *partitionOffsetManager) Close() error {
+ pom.AsyncClose()
+
+ var errors ConsumerErrors
+ for err := range pom.errors {
+ errors = append(errors, err)
+ }
+
+ if len(errors) > 0 {
+ return errors
+ }
+ return nil
+}
+
+// Broker Offset Manager
+
+type brokerOffsetManager struct {
+ parent *offsetManager
+ broker *Broker
+ timer *time.Ticker
+ updateSubscriptions chan *partitionOffsetManager
+ subscriptions map[*partitionOffsetManager]none
+ refs int
+}
+
+func (om *offsetManager) newBrokerOffsetManager(broker *Broker) *brokerOffsetManager {
+ bom := &brokerOffsetManager{
+ parent: om,
+ broker: broker,
+ timer: time.NewTicker(om.conf.Consumer.Offsets.CommitInterval),
+ updateSubscriptions: make(chan *partitionOffsetManager),
+ subscriptions: make(map[*partitionOffsetManager]none),
+ }
+
+ go withRecover(bom.mainLoop)
+
+ return bom
+}
+
+func (bom *brokerOffsetManager) mainLoop() {
+ for {
+ select {
+ case <-bom.timer.C:
+ if len(bom.subscriptions) > 0 {
+ bom.flushToBroker()
+ }
+ case s, ok := <-bom.updateSubscriptions:
+ if !ok {
+ bom.timer.Stop()
+ return
+ }
+ if _, ok := bom.subscriptions[s]; ok {
+ delete(bom.subscriptions, s)
+ } else {
+ bom.subscriptions[s] = none{}
+ }
+ }
+ }
+}
+
+func (bom *brokerOffsetManager) flushToBroker() {
+ request := bom.constructRequest()
+ if request == nil {
+ return
+ }
+
+ response, err := bom.broker.CommitOffset(request)
+
+ if err != nil {
+ bom.abort(err)
+ return
+ }
+
+ for s := range bom.subscriptions {
+ if request.blocks[s.topic] == nil || request.blocks[s.topic][s.partition] == nil {
+ continue
+ }
+
+ var err KError
+ var ok bool
+
+ if response.Errors[s.topic] == nil {
+ s.handleError(ErrIncompleteResponse)
+ delete(bom.subscriptions, s)
+ s.rebalance <- none{}
+ continue
+ }
+ if err, ok = response.Errors[s.topic][s.partition]; !ok {
+ s.handleError(ErrIncompleteResponse)
+ delete(bom.subscriptions, s)
+ s.rebalance <- none{}
+ continue
+ }
+
+ switch err {
+ case ErrNoError:
+ block := request.blocks[s.topic][s.partition]
+ s.updateCommitted(block.offset, block.metadata)
+ case ErrNotLeaderForPartition, ErrLeaderNotAvailable,
+ ErrConsumerCoordinatorNotAvailable, ErrNotCoordinatorForConsumer:
+ // not a critical error, we just need to redispatch
+ delete(bom.subscriptions, s)
+ s.rebalance <- none{}
+ case ErrOffsetMetadataTooLarge, ErrInvalidCommitOffsetSize:
+ // nothing we can do about this, just tell the user and carry on
+ s.handleError(err)
+ case ErrOffsetsLoadInProgress:
+ // nothing wrong but we didn't commit, we'll get it next time round
+ break
+ case ErrUnknownTopicOrPartition:
+ // let the user know *and* try redispatching - if topic-auto-create is
+ // enabled, redispatching should trigger a metadata request and create the
+ // topic; if not then re-dispatching won't help, but we've let the user
+ // know and it shouldn't hurt either (see https://github.com/Shopify/sarama/issues/706)
+ fallthrough
+ default:
+ // dunno, tell the user and try redispatching
+ s.handleError(err)
+ delete(bom.subscriptions, s)
+ s.rebalance <- none{}
+ }
+ }
+}
+
+func (bom *brokerOffsetManager) constructRequest() *OffsetCommitRequest {
+ var r *OffsetCommitRequest
+ var perPartitionTimestamp int64
+ if bom.parent.conf.Consumer.Offsets.Retention == 0 {
+ perPartitionTimestamp = ReceiveTime
+ r = &OffsetCommitRequest{
+ Version: 1,
+ ConsumerGroup: bom.parent.group,
+ ConsumerGroupGeneration: GroupGenerationUndefined,
+ }
+ } else {
+ r = &OffsetCommitRequest{
+ Version: 2,
+ RetentionTime: int64(bom.parent.conf.Consumer.Offsets.Retention / time.Millisecond),
+ ConsumerGroup: bom.parent.group,
+ ConsumerGroupGeneration: GroupGenerationUndefined,
+ }
+
+ }
+
+ for s := range bom.subscriptions {
+ s.lock.Lock()
+ if s.dirty {
+ r.AddBlock(s.topic, s.partition, s.offset, perPartitionTimestamp, s.metadata)
+ }
+ s.lock.Unlock()
+ }
+
+ if len(r.blocks) > 0 {
+ return r
+ }
+
+ return nil
+}
+
+func (bom *brokerOffsetManager) abort(err error) {
+ _ = bom.broker.Close() // we don't care about the error this might return, we already have one
+ bom.parent.abandonBroker(bom)
+
+ for pom := range bom.subscriptions {
+ pom.handleError(err)
+ pom.rebalance <- none{}
+ }
+
+ for s := range bom.updateSubscriptions {
+ if _, ok := bom.subscriptions[s]; !ok {
+ s.handleError(err)
+ s.rebalance <- none{}
+ }
+ }
+
+ bom.subscriptions = make(map[*partitionOffsetManager]none)
+}
diff --git a/vendor/github.com/Shopify/sarama/offset_request.go b/vendor/github.com/Shopify/sarama/offset_request.go
new file mode 100644
index 00000000..6c269601
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/offset_request.go
@@ -0,0 +1,132 @@
+package sarama
+
+type offsetRequestBlock struct {
+ time int64
+ maxOffsets int32 // Only used in version 0
+}
+
+func (b *offsetRequestBlock) encode(pe packetEncoder, version int16) error {
+ pe.putInt64(int64(b.time))
+ if version == 0 {
+ pe.putInt32(b.maxOffsets)
+ }
+
+ return nil
+}
+
+func (b *offsetRequestBlock) decode(pd packetDecoder, version int16) (err error) {
+ if b.time, err = pd.getInt64(); err != nil {
+ return err
+ }
+ if version == 0 {
+ if b.maxOffsets, err = pd.getInt32(); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+type OffsetRequest struct {
+ Version int16
+ blocks map[string]map[int32]*offsetRequestBlock
+}
+
+func (r *OffsetRequest) encode(pe packetEncoder) error {
+ pe.putInt32(-1) // replica ID is always -1 for clients
+ err := pe.putArrayLength(len(r.blocks))
+ if err != nil {
+ return err
+ }
+ for topic, partitions := range r.blocks {
+ err = pe.putString(topic)
+ if err != nil {
+ return err
+ }
+ err = pe.putArrayLength(len(partitions))
+ if err != nil {
+ return err
+ }
+ for partition, block := range partitions {
+ pe.putInt32(partition)
+ if err = block.encode(pe, r.Version); err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+func (r *OffsetRequest) decode(pd packetDecoder, version int16) error {
+ r.Version = version
+
+ // Ignore replica ID
+ if _, err := pd.getInt32(); err != nil {
+ return err
+ }
+ blockCount, err := pd.getArrayLength()
+ if err != nil {
+ return err
+ }
+ if blockCount == 0 {
+ return nil
+ }
+ r.blocks = make(map[string]map[int32]*offsetRequestBlock)
+ for i := 0; i < blockCount; i++ {
+ topic, err := pd.getString()
+ if err != nil {
+ return err
+ }
+ partitionCount, err := pd.getArrayLength()
+ if err != nil {
+ return err
+ }
+ r.blocks[topic] = make(map[int32]*offsetRequestBlock)
+ for j := 0; j < partitionCount; j++ {
+ partition, err := pd.getInt32()
+ if err != nil {
+ return err
+ }
+ block := &offsetRequestBlock{}
+ if err := block.decode(pd, version); err != nil {
+ return err
+ }
+ r.blocks[topic][partition] = block
+ }
+ }
+ return nil
+}
+
+func (r *OffsetRequest) key() int16 {
+ return 2
+}
+
+func (r *OffsetRequest) version() int16 {
+ return r.Version
+}
+
+func (r *OffsetRequest) requiredVersion() KafkaVersion {
+ switch r.Version {
+ case 1:
+ return V0_10_1_0
+ default:
+ return minVersion
+ }
+}
+
+func (r *OffsetRequest) AddBlock(topic string, partitionID int32, time int64, maxOffsets int32) {
+ if r.blocks == nil {
+ r.blocks = make(map[string]map[int32]*offsetRequestBlock)
+ }
+
+ if r.blocks[topic] == nil {
+ r.blocks[topic] = make(map[int32]*offsetRequestBlock)
+ }
+
+ tmp := new(offsetRequestBlock)
+ tmp.time = time
+ if r.Version == 0 {
+ tmp.maxOffsets = maxOffsets
+ }
+
+ r.blocks[topic][partitionID] = tmp
+}
diff --git a/vendor/github.com/Shopify/sarama/offset_response.go b/vendor/github.com/Shopify/sarama/offset_response.go
new file mode 100644
index 00000000..9a9cfe96
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/offset_response.go
@@ -0,0 +1,174 @@
+package sarama
+
+type OffsetResponseBlock struct {
+ Err KError
+ Offsets []int64 // Version 0
+ Offset int64 // Version 1
+ Timestamp int64 // Version 1
+}
+
+func (b *OffsetResponseBlock) decode(pd packetDecoder, version int16) (err error) {
+ tmp, err := pd.getInt16()
+ if err != nil {
+ return err
+ }
+ b.Err = KError(tmp)
+
+ if version == 0 {
+ b.Offsets, err = pd.getInt64Array()
+
+ return err
+ }
+
+ b.Timestamp, err = pd.getInt64()
+ if err != nil {
+ return err
+ }
+
+ b.Offset, err = pd.getInt64()
+ if err != nil {
+ return err
+ }
+
+ // For backwards compatibility put the offset in the offsets array too
+ b.Offsets = []int64{b.Offset}
+
+ return nil
+}
+
+func (b *OffsetResponseBlock) encode(pe packetEncoder, version int16) (err error) {
+ pe.putInt16(int16(b.Err))
+
+ if version == 0 {
+ return pe.putInt64Array(b.Offsets)
+ }
+
+ pe.putInt64(b.Timestamp)
+ pe.putInt64(b.Offset)
+
+ return nil
+}
+
+type OffsetResponse struct {
+ Version int16
+ Blocks map[string]map[int32]*OffsetResponseBlock
+}
+
+func (r *OffsetResponse) decode(pd packetDecoder, version int16) (err error) {
+ numTopics, err := pd.getArrayLength()
+ if err != nil {
+ return err
+ }
+
+ r.Blocks = make(map[string]map[int32]*OffsetResponseBlock, numTopics)
+ for i := 0; i < numTopics; i++ {
+ name, err := pd.getString()
+ if err != nil {
+ return err
+ }
+
+ numBlocks, err := pd.getArrayLength()
+ if err != nil {
+ return err
+ }
+
+ r.Blocks[name] = make(map[int32]*OffsetResponseBlock, numBlocks)
+
+ for j := 0; j < numBlocks; j++ {
+ id, err := pd.getInt32()
+ if err != nil {
+ return err
+ }
+
+ block := new(OffsetResponseBlock)
+ err = block.decode(pd, version)
+ if err != nil {
+ return err
+ }
+ r.Blocks[name][id] = block
+ }
+ }
+
+ return nil
+}
+
+func (r *OffsetResponse) GetBlock(topic string, partition int32) *OffsetResponseBlock {
+ if r.Blocks == nil {
+ return nil
+ }
+
+ if r.Blocks[topic] == nil {
+ return nil
+ }
+
+ return r.Blocks[topic][partition]
+}
+
+/*
+// [0 0 0 1 ntopics
+0 8 109 121 95 116 111 112 105 99 topic
+0 0 0 1 npartitions
+0 0 0 0 id
+0 0
+
+0 0 0 1 0 0 0 0
+0 1 1 1 0 0 0 1
+0 8 109 121 95 116 111 112
+105 99 0 0 0 1 0 0
+0 0 0 0 0 0 0 1
+0 0 0 0 0 1 1 1]
+
+*/
+func (r *OffsetResponse) encode(pe packetEncoder) (err error) {
+ if err = pe.putArrayLength(len(r.Blocks)); err != nil {
+ return err
+ }
+
+ for topic, partitions := range r.Blocks {
+ if err = pe.putString(topic); err != nil {
+ return err
+ }
+ if err = pe.putArrayLength(len(partitions)); err != nil {
+ return err
+ }
+ for partition, block := range partitions {
+ pe.putInt32(partition)
+ if err = block.encode(pe, r.version()); err != nil {
+ return err
+ }
+ }
+ }
+
+ return nil
+}
+
+func (r *OffsetResponse) key() int16 {
+ return 2
+}
+
+func (r *OffsetResponse) version() int16 {
+ return r.Version
+}
+
+func (r *OffsetResponse) requiredVersion() KafkaVersion {
+ switch r.Version {
+ case 1:
+ return V0_10_1_0
+ default:
+ return minVersion
+ }
+}
+
+// testing API
+
+func (r *OffsetResponse) AddTopicPartition(topic string, partition int32, offset int64) {
+ if r.Blocks == nil {
+ r.Blocks = make(map[string]map[int32]*OffsetResponseBlock)
+ }
+ byTopic, ok := r.Blocks[topic]
+ if !ok {
+ byTopic = make(map[int32]*OffsetResponseBlock)
+ r.Blocks[topic] = byTopic
+ }
+ byTopic[partition] = &OffsetResponseBlock{Offsets: []int64{offset}, Offset: offset}
+}
diff --git a/vendor/github.com/Shopify/sarama/packet_decoder.go b/vendor/github.com/Shopify/sarama/packet_decoder.go
new file mode 100644
index 00000000..28670c0e
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/packet_decoder.go
@@ -0,0 +1,45 @@
+package sarama
+
+// PacketDecoder is the interface providing helpers for reading with Kafka's encoding rules.
+// Types implementing Decoder only need to worry about calling methods like GetString,
+// not about how a string is represented in Kafka.
+type packetDecoder interface {
+ // Primitives
+ getInt8() (int8, error)
+ getInt16() (int16, error)
+ getInt32() (int32, error)
+ getInt64() (int64, error)
+ getArrayLength() (int, error)
+
+ // Collections
+ getBytes() ([]byte, error)
+ getString() (string, error)
+ getInt32Array() ([]int32, error)
+ getInt64Array() ([]int64, error)
+ getStringArray() ([]string, error)
+
+ // Subsets
+ remaining() int
+ getSubset(length int) (packetDecoder, error)
+
+ // Stacks, see PushDecoder
+ push(in pushDecoder) error
+ pop() error
+}
+
+// PushDecoder is the interface for decoding fields like CRCs and lengths where the validity
+// of the field depends on what is after it in the packet. Start them with PacketDecoder.Push() where
+// the actual value is located in the packet, then PacketDecoder.Pop() them when all the bytes they
+// depend upon have been decoded.
+type pushDecoder interface {
+ // Saves the offset into the input buffer as the location to actually read the calculated value when able.
+ saveOffset(in int)
+
+ // Returns the length of data to reserve for the input of this encoder (eg 4 bytes for a CRC32).
+ reserveLength() int
+
+ // Indicates that all required data is now available to calculate and check the field.
+ // SaveOffset is guaranteed to have been called first. The implementation should read ReserveLength() bytes
+ // of data from the saved offset, and verify it based on the data between the saved offset and curOffset.
+ check(curOffset int, buf []byte) error
+}
diff --git a/vendor/github.com/Shopify/sarama/packet_encoder.go b/vendor/github.com/Shopify/sarama/packet_encoder.go
new file mode 100644
index 00000000..27a10f6d
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/packet_encoder.go
@@ -0,0 +1,50 @@
+package sarama
+
+import "github.com/rcrowley/go-metrics"
+
+// PacketEncoder is the interface providing helpers for writing with Kafka's encoding rules.
+// Types implementing Encoder only need to worry about calling methods like PutString,
+// not about how a string is represented in Kafka.
+type packetEncoder interface {
+ // Primitives
+ putInt8(in int8)
+ putInt16(in int16)
+ putInt32(in int32)
+ putInt64(in int64)
+ putArrayLength(in int) error
+
+ // Collections
+ putBytes(in []byte) error
+ putRawBytes(in []byte) error
+ putString(in string) error
+ putStringArray(in []string) error
+ putInt32Array(in []int32) error
+ putInt64Array(in []int64) error
+
+ // Provide the current offset to record the batch size metric
+ offset() int
+
+ // Stacks, see PushEncoder
+ push(in pushEncoder)
+ pop() error
+
+ // To record metrics when provided
+ metricRegistry() metrics.Registry
+}
+
+// PushEncoder is the interface for encoding fields like CRCs and lengths where the value
+// of the field depends on what is encoded after it in the packet. Start them with PacketEncoder.Push() where
+// the actual value is located in the packet, then PacketEncoder.Pop() them when all the bytes they
+// depend upon have been written.
+type pushEncoder interface {
+ // Saves the offset into the input buffer as the location to actually write the calculated value when able.
+ saveOffset(in int)
+
+ // Returns the length of data to reserve for the output of this encoder (eg 4 bytes for a CRC32).
+ reserveLength() int
+
+ // Indicates that all required data is now available to calculate and write the field.
+ // SaveOffset is guaranteed to have been called first. The implementation should write ReserveLength() bytes
+ // of data to the saved offset, based on the data between the saved offset and curOffset.
+ run(curOffset int, buf []byte) error
+}
diff --git a/vendor/github.com/Shopify/sarama/partitioner.go b/vendor/github.com/Shopify/sarama/partitioner.go
new file mode 100644
index 00000000..97293272
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/partitioner.go
@@ -0,0 +1,135 @@
+package sarama
+
+import (
+ "hash"
+ "hash/fnv"
+ "math/rand"
+ "time"
+)
+
+// Partitioner is anything that, given a Kafka message and a number of partitions indexed [0...numPartitions-1],
+// decides to which partition to send the message. RandomPartitioner, RoundRobinPartitioner and HashPartitioner are provided
+// as simple default implementations.
+type Partitioner interface {
+ // Partition takes a message and partition count and chooses a partition
+ Partition(message *ProducerMessage, numPartitions int32) (int32, error)
+
+ // RequiresConsistency indicates to the user of the partitioner whether the
+ // mapping of key->partition is consistent or not. Specifically, if a
+ // partitioner requires consistency then it must be allowed to choose from all
+ // partitions (even ones known to be unavailable), and its choice must be
+ // respected by the caller. The obvious example is the HashPartitioner.
+ RequiresConsistency() bool
+}
+
+// PartitionerConstructor is the type for a function capable of constructing new Partitioners.
+type PartitionerConstructor func(topic string) Partitioner
+
+type manualPartitioner struct{}
+
+// NewManualPartitioner returns a Partitioner which uses the partition manually set in the provided
+// ProducerMessage's Partition field as the partition to produce to.
+func NewManualPartitioner(topic string) Partitioner {
+ return new(manualPartitioner)
+}
+
+func (p *manualPartitioner) Partition(message *ProducerMessage, numPartitions int32) (int32, error) {
+ return message.Partition, nil
+}
+
+func (p *manualPartitioner) RequiresConsistency() bool {
+ return true
+}
+
+type randomPartitioner struct {
+ generator *rand.Rand
+}
+
+// NewRandomPartitioner returns a Partitioner which chooses a random partition each time.
+func NewRandomPartitioner(topic string) Partitioner {
+ p := new(randomPartitioner)
+ p.generator = rand.New(rand.NewSource(time.Now().UTC().UnixNano()))
+ return p
+}
+
+func (p *randomPartitioner) Partition(message *ProducerMessage, numPartitions int32) (int32, error) {
+ return int32(p.generator.Intn(int(numPartitions))), nil
+}
+
+func (p *randomPartitioner) RequiresConsistency() bool {
+ return false
+}
+
+type roundRobinPartitioner struct {
+ partition int32
+}
+
+// NewRoundRobinPartitioner returns a Partitioner which walks through the available partitions one at a time.
+func NewRoundRobinPartitioner(topic string) Partitioner {
+ return &roundRobinPartitioner{}
+}
+
+func (p *roundRobinPartitioner) Partition(message *ProducerMessage, numPartitions int32) (int32, error) {
+ if p.partition >= numPartitions {
+ p.partition = 0
+ }
+ ret := p.partition
+ p.partition++
+ return ret, nil
+}
+
+func (p *roundRobinPartitioner) RequiresConsistency() bool {
+ return false
+}
+
+type hashPartitioner struct {
+ random Partitioner
+ hasher hash.Hash32
+}
+
+// NewCustomHashPartitioner is a wrapper around NewHashPartitioner, allowing the use of custom hasher.
+// The argument is a function providing the instance, implementing the hash.Hash32 interface. This is to ensure that
+// each partition dispatcher gets its own hasher, to avoid concurrency issues by sharing an instance.
+func NewCustomHashPartitioner(hasher func() hash.Hash32) PartitionerConstructor {
+ return func(topic string) Partitioner {
+ p := new(hashPartitioner)
+ p.random = NewRandomPartitioner(topic)
+ p.hasher = hasher()
+ return p
+ }
+}
+
+// NewHashPartitioner returns a Partitioner which behaves as follows. If the message's key is nil then a
+// random partition is chosen. Otherwise the FNV-1a hash of the encoded bytes of the message key is used,
+// modulus the number of partitions. This ensures that messages with the same key always end up on the
+// same partition.
+func NewHashPartitioner(topic string) Partitioner {
+ p := new(hashPartitioner)
+ p.random = NewRandomPartitioner(topic)
+ p.hasher = fnv.New32a()
+ return p
+}
+
+func (p *hashPartitioner) Partition(message *ProducerMessage, numPartitions int32) (int32, error) {
+ if message.Key == nil {
+ return p.random.Partition(message, numPartitions)
+ }
+ bytes, err := message.Key.Encode()
+ if err != nil {
+ return -1, err
+ }
+ p.hasher.Reset()
+ _, err = p.hasher.Write(bytes)
+ if err != nil {
+ return -1, err
+ }
+ partition := int32(p.hasher.Sum32()) % numPartitions
+ if partition < 0 {
+ partition = -partition
+ }
+ return partition, nil
+}
+
+func (p *hashPartitioner) RequiresConsistency() bool {
+ return true
+}
diff --git a/vendor/github.com/Shopify/sarama/prep_encoder.go b/vendor/github.com/Shopify/sarama/prep_encoder.go
new file mode 100644
index 00000000..fd5ea0f9
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/prep_encoder.go
@@ -0,0 +1,121 @@
+package sarama
+
+import (
+ "fmt"
+ "math"
+
+ "github.com/rcrowley/go-metrics"
+)
+
+type prepEncoder struct {
+ length int
+}
+
+// primitives
+
+func (pe *prepEncoder) putInt8(in int8) {
+ pe.length++
+}
+
+func (pe *prepEncoder) putInt16(in int16) {
+ pe.length += 2
+}
+
+func (pe *prepEncoder) putInt32(in int32) {
+ pe.length += 4
+}
+
+func (pe *prepEncoder) putInt64(in int64) {
+ pe.length += 8
+}
+
+func (pe *prepEncoder) putArrayLength(in int) error {
+ if in > math.MaxInt32 {
+ return PacketEncodingError{fmt.Sprintf("array too long (%d)", in)}
+ }
+ pe.length += 4
+ return nil
+}
+
+// arrays
+
+func (pe *prepEncoder) putBytes(in []byte) error {
+ pe.length += 4
+ if in == nil {
+ return nil
+ }
+ if len(in) > math.MaxInt32 {
+ return PacketEncodingError{fmt.Sprintf("byteslice too long (%d)", len(in))}
+ }
+ pe.length += len(in)
+ return nil
+}
+
+func (pe *prepEncoder) putRawBytes(in []byte) error {
+ if len(in) > math.MaxInt32 {
+ return PacketEncodingError{fmt.Sprintf("byteslice too long (%d)", len(in))}
+ }
+ pe.length += len(in)
+ return nil
+}
+
+func (pe *prepEncoder) putString(in string) error {
+ pe.length += 2
+ if len(in) > math.MaxInt16 {
+ return PacketEncodingError{fmt.Sprintf("string too long (%d)", len(in))}
+ }
+ pe.length += len(in)
+ return nil
+}
+
+func (pe *prepEncoder) putStringArray(in []string) error {
+ err := pe.putArrayLength(len(in))
+ if err != nil {
+ return err
+ }
+
+ for _, str := range in {
+ if err := pe.putString(str); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (pe *prepEncoder) putInt32Array(in []int32) error {
+ err := pe.putArrayLength(len(in))
+ if err != nil {
+ return err
+ }
+ pe.length += 4 * len(in)
+ return nil
+}
+
+func (pe *prepEncoder) putInt64Array(in []int64) error {
+ err := pe.putArrayLength(len(in))
+ if err != nil {
+ return err
+ }
+ pe.length += 8 * len(in)
+ return nil
+}
+
+func (pe *prepEncoder) offset() int {
+ return pe.length
+}
+
+// stackable
+
+func (pe *prepEncoder) push(in pushEncoder) {
+ pe.length += in.reserveLength()
+}
+
+func (pe *prepEncoder) pop() error {
+ return nil
+}
+
+// we do not record metrics during the prep encoder pass
+func (pe *prepEncoder) metricRegistry() metrics.Registry {
+ return nil
+}
diff --git a/vendor/github.com/Shopify/sarama/produce_request.go b/vendor/github.com/Shopify/sarama/produce_request.go
new file mode 100644
index 00000000..40dc8015
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/produce_request.go
@@ -0,0 +1,209 @@
+package sarama
+
+import "github.com/rcrowley/go-metrics"
+
+// RequiredAcks is used in Produce Requests to tell the broker how many replica acknowledgements
+// it must see before responding. Any of the constants defined here are valid. On broker versions
+// prior to 0.8.2.0 any other positive int16 is also valid (the broker will wait for that many
+// acknowledgements) but in 0.8.2.0 and later this will raise an exception (it has been replaced
+// by setting the `min.isr` value in the brokers configuration).
+type RequiredAcks int16
+
+const (
+ // NoResponse doesn't send any response, the TCP ACK is all you get.
+ NoResponse RequiredAcks = 0
+ // WaitForLocal waits for only the local commit to succeed before responding.
+ WaitForLocal RequiredAcks = 1
+ // WaitForAll waits for all in-sync replicas to commit before responding.
+ // The minimum number of in-sync replicas is configured on the broker via
+ // the `min.insync.replicas` configuration key.
+ WaitForAll RequiredAcks = -1
+)
+
+type ProduceRequest struct {
+ RequiredAcks RequiredAcks
+ Timeout int32
+ Version int16 // v1 requires Kafka 0.9, v2 requires Kafka 0.10
+ msgSets map[string]map[int32]*MessageSet
+}
+
+func (r *ProduceRequest) encode(pe packetEncoder) error {
+ pe.putInt16(int16(r.RequiredAcks))
+ pe.putInt32(r.Timeout)
+ err := pe.putArrayLength(len(r.msgSets))
+ if err != nil {
+ return err
+ }
+ metricRegistry := pe.metricRegistry()
+ var batchSizeMetric metrics.Histogram
+ var compressionRatioMetric metrics.Histogram
+ if metricRegistry != nil {
+ batchSizeMetric = getOrRegisterHistogram("batch-size", metricRegistry)
+ compressionRatioMetric = getOrRegisterHistogram("compression-ratio", metricRegistry)
+ }
+
+ totalRecordCount := int64(0)
+ for topic, partitions := range r.msgSets {
+ err = pe.putString(topic)
+ if err != nil {
+ return err
+ }
+ err = pe.putArrayLength(len(partitions))
+ if err != nil {
+ return err
+ }
+ topicRecordCount := int64(0)
+ var topicCompressionRatioMetric metrics.Histogram
+ if metricRegistry != nil {
+ topicCompressionRatioMetric = getOrRegisterTopicHistogram("compression-ratio", topic, metricRegistry)
+ }
+ for id, msgSet := range partitions {
+ startOffset := pe.offset()
+ pe.putInt32(id)
+ pe.push(&lengthField{})
+ err = msgSet.encode(pe)
+ if err != nil {
+ return err
+ }
+ err = pe.pop()
+ if err != nil {
+ return err
+ }
+ if metricRegistry != nil {
+ for _, messageBlock := range msgSet.Messages {
+ // Is this a fake "message" wrapping real messages?
+ if messageBlock.Msg.Set != nil {
+ topicRecordCount += int64(len(messageBlock.Msg.Set.Messages))
+ } else {
+ // A single uncompressed message
+ topicRecordCount++
+ }
+ // Better be safe than sorry when computing the compression ratio
+ if messageBlock.Msg.compressedSize != 0 {
+ compressionRatio := float64(len(messageBlock.Msg.Value)) /
+ float64(messageBlock.Msg.compressedSize)
+ // Histogram do not support decimal values, let's multiple it by 100 for better precision
+ intCompressionRatio := int64(100 * compressionRatio)
+ compressionRatioMetric.Update(intCompressionRatio)
+ topicCompressionRatioMetric.Update(intCompressionRatio)
+ }
+ }
+ batchSize := int64(pe.offset() - startOffset)
+ batchSizeMetric.Update(batchSize)
+ getOrRegisterTopicHistogram("batch-size", topic, metricRegistry).Update(batchSize)
+ }
+ }
+ if topicRecordCount > 0 {
+ getOrRegisterTopicMeter("record-send-rate", topic, metricRegistry).Mark(topicRecordCount)
+ getOrRegisterTopicHistogram("records-per-request", topic, metricRegistry).Update(topicRecordCount)
+ totalRecordCount += topicRecordCount
+ }
+ }
+ if totalRecordCount > 0 {
+ metrics.GetOrRegisterMeter("record-send-rate", metricRegistry).Mark(totalRecordCount)
+ getOrRegisterHistogram("records-per-request", metricRegistry).Update(totalRecordCount)
+ }
+
+ return nil
+}
+
+func (r *ProduceRequest) decode(pd packetDecoder, version int16) error {
+ requiredAcks, err := pd.getInt16()
+ if err != nil {
+ return err
+ }
+ r.RequiredAcks = RequiredAcks(requiredAcks)
+ if r.Timeout, err = pd.getInt32(); err != nil {
+ return err
+ }
+ topicCount, err := pd.getArrayLength()
+ if err != nil {
+ return err
+ }
+ if topicCount == 0 {
+ return nil
+ }
+ r.msgSets = make(map[string]map[int32]*MessageSet)
+ for i := 0; i < topicCount; i++ {
+ topic, err := pd.getString()
+ if err != nil {
+ return err
+ }
+ partitionCount, err := pd.getArrayLength()
+ if err != nil {
+ return err
+ }
+ r.msgSets[topic] = make(map[int32]*MessageSet)
+ for j := 0; j < partitionCount; j++ {
+ partition, err := pd.getInt32()
+ if err != nil {
+ return err
+ }
+ messageSetSize, err := pd.getInt32()
+ if err != nil {
+ return err
+ }
+ msgSetDecoder, err := pd.getSubset(int(messageSetSize))
+ if err != nil {
+ return err
+ }
+ msgSet := &MessageSet{}
+ err = msgSet.decode(msgSetDecoder)
+ if err != nil {
+ return err
+ }
+ r.msgSets[topic][partition] = msgSet
+ }
+ }
+ return nil
+}
+
+func (r *ProduceRequest) key() int16 {
+ return 0
+}
+
+func (r *ProduceRequest) version() int16 {
+ return r.Version
+}
+
+func (r *ProduceRequest) requiredVersion() KafkaVersion {
+ switch r.Version {
+ case 1:
+ return V0_9_0_0
+ case 2:
+ return V0_10_0_0
+ default:
+ return minVersion
+ }
+}
+
+func (r *ProduceRequest) AddMessage(topic string, partition int32, msg *Message) {
+ if r.msgSets == nil {
+ r.msgSets = make(map[string]map[int32]*MessageSet)
+ }
+
+ if r.msgSets[topic] == nil {
+ r.msgSets[topic] = make(map[int32]*MessageSet)
+ }
+
+ set := r.msgSets[topic][partition]
+
+ if set == nil {
+ set = new(MessageSet)
+ r.msgSets[topic][partition] = set
+ }
+
+ set.addMessage(msg)
+}
+
+func (r *ProduceRequest) AddSet(topic string, partition int32, set *MessageSet) {
+ if r.msgSets == nil {
+ r.msgSets = make(map[string]map[int32]*MessageSet)
+ }
+
+ if r.msgSets[topic] == nil {
+ r.msgSets[topic] = make(map[int32]*MessageSet)
+ }
+
+ r.msgSets[topic][partition] = set
+}
diff --git a/vendor/github.com/Shopify/sarama/produce_response.go b/vendor/github.com/Shopify/sarama/produce_response.go
new file mode 100644
index 00000000..3f05dd9f
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/produce_response.go
@@ -0,0 +1,159 @@
+package sarama
+
+import "time"
+
+type ProduceResponseBlock struct {
+ Err KError
+ Offset int64
+ // only provided if Version >= 2 and the broker is configured with `LogAppendTime`
+ Timestamp time.Time
+}
+
+func (b *ProduceResponseBlock) decode(pd packetDecoder, version int16) (err error) {
+ tmp, err := pd.getInt16()
+ if err != nil {
+ return err
+ }
+ b.Err = KError(tmp)
+
+ b.Offset, err = pd.getInt64()
+ if err != nil {
+ return err
+ }
+
+ if version >= 2 {
+ if millis, err := pd.getInt64(); err != nil {
+ return err
+ } else if millis != -1 {
+ b.Timestamp = time.Unix(millis/1000, (millis%1000)*int64(time.Millisecond))
+ }
+ }
+
+ return nil
+}
+
+type ProduceResponse struct {
+ Blocks map[string]map[int32]*ProduceResponseBlock
+ Version int16
+ ThrottleTime time.Duration // only provided if Version >= 1
+}
+
+func (r *ProduceResponse) decode(pd packetDecoder, version int16) (err error) {
+ r.Version = version
+
+ numTopics, err := pd.getArrayLength()
+ if err != nil {
+ return err
+ }
+
+ r.Blocks = make(map[string]map[int32]*ProduceResponseBlock, numTopics)
+ for i := 0; i < numTopics; i++ {
+ name, err := pd.getString()
+ if err != nil {
+ return err
+ }
+
+ numBlocks, err := pd.getArrayLength()
+ if err != nil {
+ return err
+ }
+
+ r.Blocks[name] = make(map[int32]*ProduceResponseBlock, numBlocks)
+
+ for j := 0; j < numBlocks; j++ {
+ id, err := pd.getInt32()
+ if err != nil {
+ return err
+ }
+
+ block := new(ProduceResponseBlock)
+ err = block.decode(pd, version)
+ if err != nil {
+ return err
+ }
+ r.Blocks[name][id] = block
+ }
+ }
+
+ if r.Version >= 1 {
+ millis, err := pd.getInt32()
+ if err != nil {
+ return err
+ }
+
+ r.ThrottleTime = time.Duration(millis) * time.Millisecond
+ }
+
+ return nil
+}
+
+func (r *ProduceResponse) encode(pe packetEncoder) error {
+ err := pe.putArrayLength(len(r.Blocks))
+ if err != nil {
+ return err
+ }
+ for topic, partitions := range r.Blocks {
+ err = pe.putString(topic)
+ if err != nil {
+ return err
+ }
+ err = pe.putArrayLength(len(partitions))
+ if err != nil {
+ return err
+ }
+ for id, prb := range partitions {
+ pe.putInt32(id)
+ pe.putInt16(int16(prb.Err))
+ pe.putInt64(prb.Offset)
+ }
+ }
+ if r.Version >= 1 {
+ pe.putInt32(int32(r.ThrottleTime / time.Millisecond))
+ }
+ return nil
+}
+
+func (r *ProduceResponse) key() int16 {
+ return 0
+}
+
+func (r *ProduceResponse) version() int16 {
+ return r.Version
+}
+
+func (r *ProduceResponse) requiredVersion() KafkaVersion {
+ switch r.Version {
+ case 1:
+ return V0_9_0_0
+ case 2:
+ return V0_10_0_0
+ default:
+ return minVersion
+ }
+}
+
+func (r *ProduceResponse) GetBlock(topic string, partition int32) *ProduceResponseBlock {
+ if r.Blocks == nil {
+ return nil
+ }
+
+ if r.Blocks[topic] == nil {
+ return nil
+ }
+
+ return r.Blocks[topic][partition]
+}
+
+// Testing API
+
+func (r *ProduceResponse) AddTopicPartition(topic string, partition int32, err KError) {
+ if r.Blocks == nil {
+ r.Blocks = make(map[string]map[int32]*ProduceResponseBlock)
+ }
+ byTopic, ok := r.Blocks[topic]
+ if !ok {
+ byTopic = make(map[int32]*ProduceResponseBlock)
+ r.Blocks[topic] = byTopic
+ }
+ byTopic[partition] = &ProduceResponseBlock{Err: err}
+}
diff --git a/vendor/github.com/Shopify/sarama/produce_set.go b/vendor/github.com/Shopify/sarama/produce_set.go
new file mode 100644
index 00000000..158d9c47
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/produce_set.go
@@ -0,0 +1,176 @@
+package sarama
+
+import "time"
+
+type partitionSet struct {
+ msgs []*ProducerMessage
+ setToSend *MessageSet
+ bufferBytes int
+}
+
+type produceSet struct {
+ parent *asyncProducer
+ msgs map[string]map[int32]*partitionSet
+
+ bufferBytes int
+ bufferCount int
+}
+
+func newProduceSet(parent *asyncProducer) *produceSet {
+ return &produceSet{
+ msgs: make(map[string]map[int32]*partitionSet),
+ parent: parent,
+ }
+}
+
+func (ps *produceSet) add(msg *ProducerMessage) error {
+ var err error
+ var key, val []byte
+
+ if msg.Key != nil {
+ if key, err = msg.Key.Encode(); err != nil {
+ return err
+ }
+ }
+
+ if msg.Value != nil {
+ if val, err = msg.Value.Encode(); err != nil {
+ return err
+ }
+ }
+
+ partitions := ps.msgs[msg.Topic]
+ if partitions == nil {
+ partitions = make(map[int32]*partitionSet)
+ ps.msgs[msg.Topic] = partitions
+ }
+
+ set := partitions[msg.Partition]
+ if set == nil {
+ set = &partitionSet{setToSend: new(MessageSet)}
+ partitions[msg.Partition] = set
+ }
+
+ set.msgs = append(set.msgs, msg)
+ msgToSend := &Message{Codec: CompressionNone, Key: key, Value: val}
+ if ps.parent.conf.Version.IsAtLeast(V0_10_0_0) {
+ if msg.Timestamp.IsZero() {
+ msgToSend.Timestamp = time.Now()
+ } else {
+ msgToSend.Timestamp = msg.Timestamp
+ }
+ msgToSend.Version = 1
+ }
+ set.setToSend.addMessage(msgToSend)
+
+ size := producerMessageOverhead + len(key) + len(val)
+ set.bufferBytes += size
+ ps.bufferBytes += size
+ ps.bufferCount++
+
+ return nil
+}
+
+func (ps *produceSet) buildRequest() *ProduceRequest {
+ req := &ProduceRequest{
+ RequiredAcks: ps.parent.conf.Producer.RequiredAcks,
+ Timeout: int32(ps.parent.conf.Producer.Timeout / time.Millisecond),
+ }
+ if ps.parent.conf.Version.IsAtLeast(V0_10_0_0) {
+ req.Version = 2
+ }
+
+ for topic, partitionSet := range ps.msgs {
+ for partition, set := range partitionSet {
+ if ps.parent.conf.Producer.Compression == CompressionNone {
+ req.AddSet(topic, partition, set.setToSend)
+ } else {
+ // When compression is enabled, the entire set for each partition is compressed
+ // and sent as the payload of a single fake "message" with the appropriate codec
+ // set and no key. When the server sees a message with a compression codec, it
+ // decompresses the payload and treats the result as its message set.
+ payload, err := encode(set.setToSend, ps.parent.conf.MetricRegistry)
+ if err != nil {
+ Logger.Println(err) // if this happens, it's basically our fault.
+ panic(err)
+ }
+ compMsg := &Message{
+ Codec: ps.parent.conf.Producer.Compression,
+ Key: nil,
+ Value: payload,
+ Set: set.setToSend, // Provide the underlying message set for accurate metrics
+ }
+ if ps.parent.conf.Version.IsAtLeast(V0_10_0_0) {
+ compMsg.Version = 1
+ compMsg.Timestamp = set.setToSend.Messages[0].Msg.Timestamp
+ }
+ req.AddMessage(topic, partition, compMsg)
+ }
+ }
+ }
+
+ return req
+}
+
+func (ps *produceSet) eachPartition(cb func(topic string, partition int32, msgs []*ProducerMessage)) {
+ for topic, partitionSet := range ps.msgs {
+ for partition, set := range partitionSet {
+ cb(topic, partition, set.msgs)
+ }
+ }
+}
+
+func (ps *produceSet) dropPartition(topic string, partition int32) []*ProducerMessage {
+ if ps.msgs[topic] == nil {
+ return nil
+ }
+ set := ps.msgs[topic][partition]
+ if set == nil {
+ return nil
+ }
+ ps.bufferBytes -= set.bufferBytes
+ ps.bufferCount -= len(set.msgs)
+ delete(ps.msgs[topic], partition)
+ return set.msgs
+}
+
+func (ps *produceSet) wouldOverflow(msg *ProducerMessage) bool {
+ switch {
+ // Would we overflow our maximum possible size-on-the-wire? 10KiB is arbitrary overhead for safety.
+ case ps.bufferBytes+msg.byteSize() >= int(MaxRequestSize-(10*1024)):
+ return true
+ // Would we overflow the size-limit of a compressed message-batch for this partition?
+ case ps.parent.conf.Producer.Compression != CompressionNone &&
+ ps.msgs[msg.Topic] != nil && ps.msgs[msg.Topic][msg.Partition] != nil &&
+ ps.msgs[msg.Topic][msg.Partition].bufferBytes+msg.byteSize() >= ps.parent.conf.Producer.MaxMessageBytes:
+ return true
+ // Would we overflow simply in number of messages?
+ case ps.parent.conf.Producer.Flush.MaxMessages > 0 && ps.bufferCount >= ps.parent.conf.Producer.Flush.MaxMessages:
+ return true
+ default:
+ return false
+ }
+}
+
+func (ps *produceSet) readyToFlush() bool {
+ switch {
+ // If we don't have any messages, nothing else matters
+ case ps.empty():
+ return false
+ // If all three config values are 0, we always flush as-fast-as-possible
+ case ps.parent.conf.Producer.Flush.Frequency == 0 && ps.parent.conf.Producer.Flush.Bytes == 0 && ps.parent.conf.Producer.Flush.Messages == 0:
+ return true
+ // If we've passed the message trigger-point
+ case ps.parent.conf.Producer.Flush.Messages > 0 && ps.bufferCount >= ps.parent.conf.Producer.Flush.Messages:
+ return true
+ // If we've passed the byte trigger-point
+ case ps.parent.conf.Producer.Flush.Bytes > 0 && ps.bufferBytes >= ps.parent.conf.Producer.Flush.Bytes:
+ return true
+ default:
+ return false
+ }
+}
+
+func (ps *produceSet) empty() bool {
+ return ps.bufferCount == 0
+}
diff --git a/vendor/github.com/Shopify/sarama/real_decoder.go b/vendor/github.com/Shopify/sarama/real_decoder.go
new file mode 100644
index 00000000..3cf93533
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/real_decoder.go
@@ -0,0 +1,260 @@
+package sarama
+
+import (
+ "encoding/binary"
+ "math"
+)
+
+var errInvalidArrayLength = PacketDecodingError{"invalid array length"}
+var errInvalidByteSliceLength = PacketDecodingError{"invalid byteslice length"}
+var errInvalidStringLength = PacketDecodingError{"invalid string length"}
+var errInvalidSubsetSize = PacketDecodingError{"invalid subset size"}
+
+type realDecoder struct {
+ raw []byte
+ off int
+ stack []pushDecoder
+}
+
+// primitives
+
+func (rd *realDecoder) getInt8() (int8, error) {
+ if rd.remaining() < 1 {
+ rd.off = len(rd.raw)
+ return -1, ErrInsufficientData
+ }
+ tmp := int8(rd.raw[rd.off])
+ rd.off++
+ return tmp, nil
+}
+
+func (rd *realDecoder) getInt16() (int16, error) {
+ if rd.remaining() < 2 {
+ rd.off = len(rd.raw)
+ return -1, ErrInsufficientData
+ }
+ tmp := int16(binary.BigEndian.Uint16(rd.raw[rd.off:]))
+ rd.off += 2
+ return tmp, nil
+}
+
+func (rd *realDecoder) getInt32() (int32, error) {
+ if rd.remaining() < 4 {
+ rd.off = len(rd.raw)
+ return -1, ErrInsufficientData
+ }
+ tmp := int32(binary.BigEndian.Uint32(rd.raw[rd.off:]))
+ rd.off += 4
+ return tmp, nil
+}
+
+func (rd *realDecoder) getInt64() (int64, error) {
+ if rd.remaining() < 8 {
+ rd.off = len(rd.raw)
+ return -1, ErrInsufficientData
+ }
+ tmp := int64(binary.BigEndian.Uint64(rd.raw[rd.off:]))
+ rd.off += 8
+ return tmp, nil
+}
+
+func (rd *realDecoder) getArrayLength() (int, error) {
+ if rd.remaining() < 4 {
+ rd.off = len(rd.raw)
+ return -1, ErrInsufficientData
+ }
+ tmp := int(binary.BigEndian.Uint32(rd.raw[rd.off:]))
+ rd.off += 4
+ if tmp > rd.remaining() {
+ rd.off = len(rd.raw)
+ return -1, ErrInsufficientData
+ } else if tmp > 2*math.MaxUint16 {
+ return -1, errInvalidArrayLength
+ }
+ return tmp, nil
+}
+
+// collections
+
+func (rd *realDecoder) getBytes() ([]byte, error) {
+ tmp, err := rd.getInt32()
+
+ if err != nil {
+ return nil, err
+ }
+
+ n := int(tmp)
+
+ switch {
+ case n < -1:
+ return nil, errInvalidByteSliceLength
+ case n == -1:
+ return nil, nil
+ case n == 0:
+ return make([]byte, 0), nil
+ case n > rd.remaining():
+ rd.off = len(rd.raw)
+ return nil, ErrInsufficientData
+ }
+
+ tmpStr := rd.raw[rd.off : rd.off+n]
+ rd.off += n
+ return tmpStr, nil
+}
+
+func (rd *realDecoder) getString() (string, error) {
+ tmp, err := rd.getInt16()
+
+ if err != nil {
+ return "", err
+ }
+
+ n := int(tmp)
+
+ switch {
+ case n < -1:
+ return "", errInvalidStringLength
+ case n == -1:
+ return "", nil
+ case n == 0:
+ return "", nil
+ case n > rd.remaining():
+ rd.off = len(rd.raw)
+ return "", ErrInsufficientData
+ }
+
+ tmpStr := string(rd.raw[rd.off : rd.off+n])
+ rd.off += n
+ return tmpStr, nil
+}
+
+func (rd *realDecoder) getInt32Array() ([]int32, error) {
+ if rd.remaining() < 4 {
+ rd.off = len(rd.raw)
+ return nil, ErrInsufficientData
+ }
+ n := int(binary.BigEndian.Uint32(rd.raw[rd.off:]))
+ rd.off += 4
+
+ if rd.remaining() < 4*n {
+ rd.off = len(rd.raw)
+ return nil, ErrInsufficientData
+ }
+
+ if n == 0 {
+ return nil, nil
+ }
+
+ if n < 0 {
+ return nil, errInvalidArrayLength
+ }
+
+ ret := make([]int32, n)
+ for i := range ret {
+ ret[i] = int32(binary.BigEndian.Uint32(rd.raw[rd.off:]))
+ rd.off += 4
+ }
+ return ret, nil
+}
+
+func (rd *realDecoder) getInt64Array() ([]int64, error) {
+ if rd.remaining() < 4 {
+ rd.off = len(rd.raw)
+ return nil, ErrInsufficientData
+ }
+ n := int(binary.BigEndian.Uint32(rd.raw[rd.off:]))
+ rd.off += 4
+
+ if rd.remaining() < 8*n {
+ rd.off = len(rd.raw)
+ return nil, ErrInsufficientData
+ }
+
+ if n == 0 {
+ return nil, nil
+ }
+
+ if n < 0 {
+ return nil, errInvalidArrayLength
+ }
+
+ ret := make([]int64, n)
+ for i := range ret {
+ ret[i] = int64(binary.BigEndian.Uint64(rd.raw[rd.off:]))
+ rd.off += 8
+ }
+ return ret, nil
+}
+
+func (rd *realDecoder) getStringArray() ([]string, error) {
+ if rd.remaining() < 4 {
+ rd.off = len(rd.raw)
+ return nil, ErrInsufficientData
+ }
+ n := int(binary.BigEndian.Uint32(rd.raw[rd.off:]))
+ rd.off += 4
+
+ if n == 0 {
+ return nil, nil
+ }
+
+ if n < 0 {
+ return nil, errInvalidArrayLength
+ }
+
+ ret := make([]string, n)
+ for i := range ret {
+ str, err := rd.getString()
+ if err != nil {
+ return nil, err
+ }
+
+ ret[i] = str
+ }
+ return ret, nil
+}
+
+// subsets
+
+func (rd *realDecoder) remaining() int {
+ return len(rd.raw) - rd.off
+}
+
+func (rd *realDecoder) getSubset(length int) (packetDecoder, error) {
+ if length < 0 {
+ return nil, errInvalidSubsetSize
+ } else if length > rd.remaining() {
+ rd.off = len(rd.raw)
+ return nil, ErrInsufficientData
+ }
+
+ start := rd.off
+ rd.off += length
+ return &realDecoder{raw: rd.raw[start:rd.off]}, nil
+}
+
+// stacks
+
+func (rd *realDecoder) push(in pushDecoder) error {
+ in.saveOffset(rd.off)
+
+ reserve := in.reserveLength()
+ if rd.remaining() < reserve {
+ rd.off = len(rd.raw)
+ return ErrInsufficientData
+ }
+
+ rd.stack = append(rd.stack, in)
+
+ rd.off += reserve
+
+ return nil
+}
+
+func (rd *realDecoder) pop() error {
+ // this is go's ugly pop pattern (the inverse of append)
+ in := rd.stack[len(rd.stack)-1]
+ rd.stack = rd.stack[:len(rd.stack)-1]
+
+ return in.check(rd.off, rd.raw)
+}
diff --git a/vendor/github.com/Shopify/sarama/real_encoder.go b/vendor/github.com/Shopify/sarama/real_encoder.go
new file mode 100644
index 00000000..ced4267c
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/real_encoder.go
@@ -0,0 +1,129 @@
+package sarama
+
+import (
+ "encoding/binary"
+
+ "github.com/rcrowley/go-metrics"
+)
+
+type realEncoder struct {
+ raw []byte
+ off int
+ stack []pushEncoder
+ registry metrics.Registry
+}
+
+// primitives
+
+func (re *realEncoder) putInt8(in int8) {
+ re.raw[re.off] = byte(in)
+ re.off++
+}
+
+func (re *realEncoder) putInt16(in int16) {
+ binary.BigEndian.PutUint16(re.raw[re.off:], uint16(in))
+ re.off += 2
+}
+
+func (re *realEncoder) putInt32(in int32) {
+ binary.BigEndian.PutUint32(re.raw[re.off:], uint32(in))
+ re.off += 4
+}
+
+func (re *realEncoder) putInt64(in int64) {
+ binary.BigEndian.PutUint64(re.raw[re.off:], uint64(in))
+ re.off += 8
+}
+
+func (re *realEncoder) putArrayLength(in int) error {
+ re.putInt32(int32(in))
+ return nil
+}
+
+// collection
+
+func (re *realEncoder) putRawBytes(in []byte) error {
+ copy(re.raw[re.off:], in)
+ re.off += len(in)
+ return nil
+}
+
+func (re *realEncoder) putBytes(in []byte) error {
+ if in == nil {
+ re.putInt32(-1)
+ return nil
+ }
+ re.putInt32(int32(len(in)))
+ copy(re.raw[re.off:], in)
+ re.off += len(in)
+ return nil
+}
+
+func (re *realEncoder) putString(in string) error {
+ re.putInt16(int16(len(in)))
+ copy(re.raw[re.off:], in)
+ re.off += len(in)
+ return nil
+}
+
+func (re *realEncoder) putStringArray(in []string) error {
+ err := re.putArrayLength(len(in))
+ if err != nil {
+ return err
+ }
+
+ for _, val := range in {
+ if err := re.putString(val); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (re *realEncoder) putInt32Array(in []int32) error {
+ err := re.putArrayLength(len(in))
+ if err != nil {
+ return err
+ }
+ for _, val := range in {
+ re.putInt32(val)
+ }
+ return nil
+}
+
+func (re *realEncoder) putInt64Array(in []int64) error {
+ err := re.putArrayLength(len(in))
+ if err != nil {
+ return err
+ }
+ for _, val := range in {
+ re.putInt64(val)
+ }
+ return nil
+}
+
+func (re *realEncoder) offset() int {
+ return re.off
+}
+
+// stacks
+
+func (re *realEncoder) push(in pushEncoder) {
+ in.saveOffset(re.off)
+ re.off += in.reserveLength()
+ re.stack = append(re.stack, in)
+}
+
+func (re *realEncoder) pop() error {
+ // this is go's ugly pop pattern (the inverse of append)
+ in := re.stack[len(re.stack)-1]
+ re.stack = re.stack[:len(re.stack)-1]
+
+ return in.run(re.off, re.raw)
+}
+
+// we do record metrics during the real encoder pass
+func (re *realEncoder) metricRegistry() metrics.Registry {
+ return re.registry
+}
diff --git a/vendor/github.com/Shopify/sarama/request.go b/vendor/github.com/Shopify/sarama/request.go
new file mode 100644
index 00000000..73310ca8
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/request.go
@@ -0,0 +1,119 @@
+package sarama
+
+import (
+ "encoding/binary"
+ "fmt"
+ "io"
+)
+
+type protocolBody interface {
+ encoder
+ versionedDecoder
+ key() int16
+ version() int16
+ requiredVersion() KafkaVersion
+}
+
+type request struct {
+ correlationID int32
+ clientID string
+ body protocolBody
+}
+
+func (r *request) encode(pe packetEncoder) (err error) {
+ pe.push(&lengthField{})
+ pe.putInt16(r.body.key())
+ pe.putInt16(r.body.version())
+ pe.putInt32(r.correlationID)
+ err = pe.putString(r.clientID)
+ if err != nil {
+ return err
+ }
+ err = r.body.encode(pe)
+ if err != nil {
+ return err
+ }
+ return pe.pop()
+}
+
+func (r *request) decode(pd packetDecoder) (err error) {
+ var key int16
+ if key, err = pd.getInt16(); err != nil {
+ return err
+ }
+ var version int16
+ if version, err = pd.getInt16(); err != nil {
+ return err
+ }
+ if r.correlationID, err = pd.getInt32(); err != nil {
+ return err
+ }
+ r.clientID, err = pd.getString()
+
+ r.body = allocateBody(key, version)
+ if r.body == nil {
+ return PacketDecodingError{fmt.Sprintf("unknown request key (%d)", key)}
+ }
+ return r.body.decode(pd, version)
+}
+
+func decodeRequest(r io.Reader) (req *request, bytesRead int, err error) {
+ lengthBytes := make([]byte, 4)
+ if _, err := io.ReadFull(r, lengthBytes); err != nil {
+ return nil, bytesRead, err
+ }
+ bytesRead += len(lengthBytes)
+
+ length := int32(binary.BigEndian.Uint32(lengthBytes))
+ if length <= 4 || length > MaxRequestSize {
+ return nil, bytesRead, PacketDecodingError{fmt.Sprintf("message of length %d too large or too small", length)}
+ }
+
+ encodedReq := make([]byte, length)
+ if _, err := io.ReadFull(r, encodedReq); err != nil {
+ return nil, bytesRead, err
+ }
+ bytesRead += len(encodedReq)
+
+ req = &request{}
+ if err := decode(encodedReq, req); err != nil {
+ return nil, bytesRead, err
+ }
+ return req, bytesRead, nil
+}
+
+func allocateBody(key, version int16) protocolBody {
+ switch key {
+ case 0:
+ return &ProduceRequest{}
+ case 1:
+ return &FetchRequest{}
+ case 2:
+ return &OffsetRequest{Version: version}
+ case 3:
+ return &MetadataRequest{}
+ case 8:
+ return &OffsetCommitRequest{Version: version}
+ case 9:
+ return &OffsetFetchRequest{}
+ case 10:
+ return &ConsumerMetadataRequest{}
+ case 11:
+ return &JoinGroupRequest{}
+ case 12:
+ return &HeartbeatRequest{}
+ case 13:
+ return &LeaveGroupRequest{}
+ case 14:
+ return &SyncGroupRequest{}
+ case 15:
+ return &DescribeGroupsRequest{}
+ case 16:
+ return &ListGroupsRequest{}
+ case 17:
+ return &SaslHandshakeRequest{}
+ case 18:
+ return &ApiVersionsRequest{}
+ }
+ return nil
+}
diff --git a/vendor/github.com/Shopify/sarama/response_header.go b/vendor/github.com/Shopify/sarama/response_header.go
new file mode 100644
index 00000000..f3f4d27d
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/response_header.go
@@ -0,0 +1,21 @@
+package sarama
+
+import "fmt"
+
+type responseHeader struct {
+ length int32
+ correlationID int32
+}
+
+func (r *responseHeader) decode(pd packetDecoder) (err error) {
+ r.length, err = pd.getInt32()
+ if err != nil {
+ return err
+ }
+ if r.length <= 4 || r.length > MaxResponseSize {
+ return PacketDecodingError{fmt.Sprintf("message of length %d too large or too small", r.length)}
+ }
+
+ r.correlationID, err = pd.getInt32()
+ return err
+}
diff --git a/vendor/github.com/Shopify/sarama/sarama.go b/vendor/github.com/Shopify/sarama/sarama.go
new file mode 100644
index 00000000..7d5dc60d
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/sarama.go
@@ -0,0 +1,99 @@
+/*
+Package sarama is a pure Go client library for dealing with Apache Kafka (versions 0.8 and later). It includes a high-level
+API for easily producing and consuming messages, and a low-level API for controlling bytes on the wire when the high-level
+API is insufficient. Usage examples for the high-level APIs are provided inline with their full documentation.
+
+To produce messages, use either the AsyncProducer or the SyncProducer. The AsyncProducer accepts messages on a channel
+and produces them asynchronously in the background as efficiently as possible; it is preferred in most cases.
+The SyncProducer provides a method which will block until Kafka acknowledges the message as produced. This can be
+useful but comes with two caveats: it will generally be less efficient, and the actual durability guarantees
+depend on the configured value of `Producer.RequiredAcks`. There are configurations where a message acknowledged by the
+SyncProducer can still sometimes be lost.
+
+To consume messages, use the Consumer. Note that Sarama's Consumer implementation does not currently support automatic
+consumer-group rebalancing and offset tracking. For Zookeeper-based tracking (Kafka 0.8.2 and earlier), the
+https://github.com/wvanbergen/kafka library builds on Sarama to add this support. For Kafka-based tracking (Kafka 0.9
+and later), the https://github.com/bsm/sarama-cluster library builds on Sarama to add this support.
+
+For lower-level needs, the Broker and Request/Response objects permit precise control over each connection
+and message sent on the wire; the Client provides higher-level metadata management that is shared between
+the producers and the consumer. The Request/Response objects and properties are mostly undocumented, as they line up
+exactly with the protocol fields documented by Kafka at
+https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol
+
+Metrics are exposed through https://github.com/rcrowley/go-metrics library in a local registry.
+
+Broker related metrics:
+
+ +----------------------------------------------+------------+---------------------------------------------------------------+
+ | Name | Type | Description |
+ +----------------------------------------------+------------+---------------------------------------------------------------+
+ | incoming-byte-rate | meter | Bytes/second read off all brokers |
+ | incoming-byte-rate-for-broker- | meter | Bytes/second read off a given broker |
+ | outgoing-byte-rate | meter | Bytes/second written off all brokers |
+ | outgoing-byte-rate-for-broker- | meter | Bytes/second written off a given broker |
+ | request-rate | meter | Requests/second sent to all brokers |
+ | request-rate-for-broker- | meter | Requests/second sent to a given broker |
+ | request-size | histogram | Distribution of the request size in bytes for all brokers |
+ | request-size-for-broker- | histogram | Distribution of the request size in bytes for a given broker |
+ | request-latency-in-ms | histogram | Distribution of the request latency in ms for all brokers |
+ | request-latency-in-ms-for-broker- | histogram | Distribution of the request latency in ms for a given broker |
+ | response-rate | meter | Responses/second received from all brokers |
+ | response-rate-for-broker- | meter | Responses/second received from a given broker |
+ | response-size | histogram | Distribution of the response size in bytes for all brokers |
+ | response-size-for-broker- | histogram | Distribution of the response size in bytes for a given broker |
+ +----------------------------------------------+------------+---------------------------------------------------------------+
+
+Note that we do not gather specific metrics for seed brokers but they are part of the "all brokers" metrics.
+
+Producer related metrics:
+
+ +-------------------------------------------+------------+--------------------------------------------------------------------------------------+
+ | Name | Type | Description |
+ +-------------------------------------------+------------+--------------------------------------------------------------------------------------+
+ | batch-size | histogram | Distribution of the number of bytes sent per partition per request for all topics |
+ | batch-size-for-topic- | histogram | Distribution of the number of bytes sent per partition per request for a given topic |
+ | record-send-rate | meter | Records/second sent to all topics |
+ | record-send-rate-for-topic- | meter | Records/second sent to a given topic |
+ | records-per-request | histogram | Distribution of the number of records sent per request for all topics |
+ | records-per-request-for-topic- | histogram | Distribution of the number of records sent per request for a given topic |
+ | compression-ratio | histogram | Distribution of the compression ratio times 100 of record batches for all topics |
+ | compression-ratio-for-topic- | histogram | Distribution of the compression ratio times 100 of record batches for a given topic |
+ +-------------------------------------------+------------+--------------------------------------------------------------------------------------+
+
+*/
+package sarama
+
+import (
+ "io/ioutil"
+ "log"
+)
+
+// Logger is the instance of a StdLogger interface that Sarama writes connection
+// management events to. By default it is set to discard all log messages via ioutil.Discard,
+// but you can set it to redirect wherever you want.
+var Logger StdLogger = log.New(ioutil.Discard, "[Sarama] ", log.LstdFlags)
+
+// StdLogger is used to log error messages.
+type StdLogger interface {
+ Print(v ...interface{})
+ Printf(format string, v ...interface{})
+ Println(v ...interface{})
+}
+
+// PanicHandler is called for recovering from panics spawned internally to the library (and thus
+// not recoverable by the caller's goroutine). Defaults to nil, which means panics are not recovered.
+var PanicHandler func(interface{})
+
+// MaxRequestSize is the maximum size (in bytes) of any request that Sarama will attempt to send. Trying
+// to send a request larger than this will result in an PacketEncodingError. The default of 100 MiB is aligned
+// with Kafka's default `socket.request.max.bytes`, which is the largest request the broker will attempt
+// to process.
+var MaxRequestSize int32 = 100 * 1024 * 1024
+
+// MaxResponseSize is the maximum size (in bytes) of any response that Sarama will attempt to parse. If
+// a broker returns a response message larger than this value, Sarama will return a PacketDecodingError to
+// protect the client from running out of memory. Please note that brokers do not have any natural limit on
+// the size of responses they send. In particular, they can send arbitrarily large fetch responses to consumers
+// (see https://issues.apache.org/jira/browse/KAFKA-2063).
+var MaxResponseSize int32 = 100 * 1024 * 1024
diff --git a/vendor/github.com/Shopify/sarama/sasl_handshake_request.go b/vendor/github.com/Shopify/sarama/sasl_handshake_request.go
new file mode 100644
index 00000000..fbbc8947
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/sasl_handshake_request.go
@@ -0,0 +1,33 @@
+package sarama
+
+type SaslHandshakeRequest struct {
+ Mechanism string
+}
+
+func (r *SaslHandshakeRequest) encode(pe packetEncoder) error {
+ if err := pe.putString(r.Mechanism); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (r *SaslHandshakeRequest) decode(pd packetDecoder, version int16) (err error) {
+ if r.Mechanism, err = pd.getString(); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (r *SaslHandshakeRequest) key() int16 {
+ return 17
+}
+
+func (r *SaslHandshakeRequest) version() int16 {
+ return 0
+}
+
+func (r *SaslHandshakeRequest) requiredVersion() KafkaVersion {
+ return V0_10_0_0
+}
diff --git a/vendor/github.com/Shopify/sarama/sasl_handshake_response.go b/vendor/github.com/Shopify/sarama/sasl_handshake_response.go
new file mode 100644
index 00000000..ef290d4b
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/sasl_handshake_response.go
@@ -0,0 +1,38 @@
+package sarama
+
+type SaslHandshakeResponse struct {
+ Err KError
+ EnabledMechanisms []string
+}
+
+func (r *SaslHandshakeResponse) encode(pe packetEncoder) error {
+ pe.putInt16(int16(r.Err))
+ return pe.putStringArray(r.EnabledMechanisms)
+}
+
+func (r *SaslHandshakeResponse) decode(pd packetDecoder, version int16) error {
+ kerr, err := pd.getInt16()
+ if err != nil {
+ return err
+ }
+
+ r.Err = KError(kerr)
+
+ if r.EnabledMechanisms, err = pd.getStringArray(); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (r *SaslHandshakeResponse) key() int16 {
+ return 17
+}
+
+func (r *SaslHandshakeResponse) version() int16 {
+ return 0
+}
+
+func (r *SaslHandshakeResponse) requiredVersion() KafkaVersion {
+ return V0_10_0_0
+}
diff --git a/vendor/github.com/Shopify/sarama/sync_group_request.go b/vendor/github.com/Shopify/sarama/sync_group_request.go
new file mode 100644
index 00000000..fe207080
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/sync_group_request.go
@@ -0,0 +1,100 @@
+package sarama
+
+type SyncGroupRequest struct {
+ GroupId string
+ GenerationId int32
+ MemberId string
+ GroupAssignments map[string][]byte
+}
+
+func (r *SyncGroupRequest) encode(pe packetEncoder) error {
+ if err := pe.putString(r.GroupId); err != nil {
+ return err
+ }
+
+ pe.putInt32(r.GenerationId)
+
+ if err := pe.putString(r.MemberId); err != nil {
+ return err
+ }
+
+ if err := pe.putArrayLength(len(r.GroupAssignments)); err != nil {
+ return err
+ }
+ for memberId, memberAssignment := range r.GroupAssignments {
+ if err := pe.putString(memberId); err != nil {
+ return err
+ }
+ if err := pe.putBytes(memberAssignment); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (r *SyncGroupRequest) decode(pd packetDecoder, version int16) (err error) {
+ if r.GroupId, err = pd.getString(); err != nil {
+ return
+ }
+ if r.GenerationId, err = pd.getInt32(); err != nil {
+ return
+ }
+ if r.MemberId, err = pd.getString(); err != nil {
+ return
+ }
+
+ n, err := pd.getArrayLength()
+ if err != nil {
+ return err
+ }
+ if n == 0 {
+ return nil
+ }
+
+ r.GroupAssignments = make(map[string][]byte)
+ for i := 0; i < n; i++ {
+ memberId, err := pd.getString()
+ if err != nil {
+ return err
+ }
+ memberAssignment, err := pd.getBytes()
+ if err != nil {
+ return err
+ }
+
+ r.GroupAssignments[memberId] = memberAssignment
+ }
+
+ return nil
+}
+
+func (r *SyncGroupRequest) key() int16 {
+ return 14
+}
+
+func (r *SyncGroupRequest) version() int16 {
+ return 0
+}
+
+func (r *SyncGroupRequest) requiredVersion() KafkaVersion {
+ return V0_9_0_0
+}
+
+func (r *SyncGroupRequest) AddGroupAssignment(memberId string, memberAssignment []byte) {
+ if r.GroupAssignments == nil {
+ r.GroupAssignments = make(map[string][]byte)
+ }
+
+ r.GroupAssignments[memberId] = memberAssignment
+}
+
+func (r *SyncGroupRequest) AddGroupAssignmentMember(memberId string, memberAssignment *ConsumerGroupMemberAssignment) error {
+ bin, err := encode(memberAssignment, nil)
+ if err != nil {
+ return err
+ }
+
+ r.AddGroupAssignment(memberId, bin)
+ return nil
+}
diff --git a/vendor/github.com/Shopify/sarama/sync_group_response.go b/vendor/github.com/Shopify/sarama/sync_group_response.go
new file mode 100644
index 00000000..194b382b
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/sync_group_response.go
@@ -0,0 +1,41 @@
+package sarama
+
+type SyncGroupResponse struct {
+ Err KError
+ MemberAssignment []byte
+}
+
+func (r *SyncGroupResponse) GetMemberAssignment() (*ConsumerGroupMemberAssignment, error) {
+ assignment := new(ConsumerGroupMemberAssignment)
+ err := decode(r.MemberAssignment, assignment)
+ return assignment, err
+}
+
+func (r *SyncGroupResponse) encode(pe packetEncoder) error {
+ pe.putInt16(int16(r.Err))
+ return pe.putBytes(r.MemberAssignment)
+}
+
+func (r *SyncGroupResponse) decode(pd packetDecoder, version int16) (err error) {
+ kerr, err := pd.getInt16()
+ if err != nil {
+ return err
+ }
+
+ r.Err = KError(kerr)
+
+ r.MemberAssignment, err = pd.getBytes()
+ return
+}
+
+func (r *SyncGroupResponse) key() int16 {
+ return 14
+}
+
+func (r *SyncGroupResponse) version() int16 {
+ return 0
+}
+
+func (r *SyncGroupResponse) requiredVersion() KafkaVersion {
+ return V0_9_0_0
+}
diff --git a/vendor/github.com/Shopify/sarama/sync_producer.go b/vendor/github.com/Shopify/sarama/sync_producer.go
new file mode 100644
index 00000000..dd096b6d
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/sync_producer.go
@@ -0,0 +1,164 @@
+package sarama
+
+import "sync"
+
+// SyncProducer publishes Kafka messages, blocking until they have been acknowledged. It routes messages to the correct
+// broker, refreshing metadata as appropriate, and parses responses for errors. You must call Close() on a producer
+// to avoid leaks, it may not be garbage-collected automatically when it passes out of scope.
+//
+// The SyncProducer comes with two caveats: it will generally be less efficient than the AsyncProducer, and the actual
+// durability guarantee provided when a message is acknowledged depend on the configured value of `Producer.RequiredAcks`.
+// There are configurations where a message acknowledged by the SyncProducer can still sometimes be lost.
+//
+// For implementation reasons, the SyncProducer requires `Producer.Return.Errors` and `Producer.Return.Successes` to
+// be set to true in its configuration.
+type SyncProducer interface {
+
+ // SendMessage produces a given message, and returns only when it either has
+ // succeeded or failed to produce. It will return the partition and the offset
+ // of the produced message, or an error if the message failed to produce.
+ SendMessage(msg *ProducerMessage) (partition int32, offset int64, err error)
+
+ // SendMessages produces a given set of messages, and returns only when all
+ // messages in the set have either succeeded or failed. Note that messages
+ // can succeed and fail individually; if some succeed and some fail,
+ // SendMessages will return an error.
+ SendMessages(msgs []*ProducerMessage) error
+
+ // Close shuts down the producer and waits for any buffered messages to be
+ // flushed. You must call this function before a producer object passes out of
+ // scope, as it may otherwise leak memory. You must call this before calling
+ // Close on the underlying client.
+ Close() error
+}
+
+type syncProducer struct {
+ producer *asyncProducer
+ wg sync.WaitGroup
+}
+
+// NewSyncProducer creates a new SyncProducer using the given broker addresses and configuration.
+func NewSyncProducer(addrs []string, config *Config) (SyncProducer, error) {
+ if config == nil {
+ config = NewConfig()
+ config.Producer.Return.Successes = true
+ }
+
+ if err := verifyProducerConfig(config); err != nil {
+ return nil, err
+ }
+
+ p, err := NewAsyncProducer(addrs, config)
+ if err != nil {
+ return nil, err
+ }
+ return newSyncProducerFromAsyncProducer(p.(*asyncProducer)), nil
+}
+
+// NewSyncProducerFromClient creates a new SyncProducer using the given client. It is still
+// necessary to call Close() on the underlying client when shutting down this producer.
+func NewSyncProducerFromClient(client Client) (SyncProducer, error) {
+ if err := verifyProducerConfig(client.Config()); err != nil {
+ return nil, err
+ }
+
+ p, err := NewAsyncProducerFromClient(client)
+ if err != nil {
+ return nil, err
+ }
+ return newSyncProducerFromAsyncProducer(p.(*asyncProducer)), nil
+}
+
+func newSyncProducerFromAsyncProducer(p *asyncProducer) *syncProducer {
+ sp := &syncProducer{producer: p}
+
+ sp.wg.Add(2)
+ go withRecover(sp.handleSuccesses)
+ go withRecover(sp.handleErrors)
+
+ return sp
+}
+
+func verifyProducerConfig(config *Config) error {
+ if !config.Producer.Return.Errors {
+ return ConfigurationError("Producer.Return.Errors must be true to be used in a SyncProducer")
+ }
+ if !config.Producer.Return.Successes {
+ return ConfigurationError("Producer.Return.Successes must be true to be used in a SyncProducer")
+ }
+ return nil
+}
+
+func (sp *syncProducer) SendMessage(msg *ProducerMessage) (partition int32, offset int64, err error) {
+ oldMetadata := msg.Metadata
+ defer func() {
+ msg.Metadata = oldMetadata
+ }()
+
+ expectation := make(chan *ProducerError, 1)
+ msg.Metadata = expectation
+ sp.producer.Input() <- msg
+
+ if err := <-expectation; err != nil {
+ return -1, -1, err.Err
+ }
+
+ return msg.Partition, msg.Offset, nil
+}
+
+func (sp *syncProducer) SendMessages(msgs []*ProducerMessage) error {
+ savedMetadata := make([]interface{}, len(msgs))
+ for i := range msgs {
+ savedMetadata[i] = msgs[i].Metadata
+ }
+ defer func() {
+ for i := range msgs {
+ msgs[i].Metadata = savedMetadata[i]
+ }
+ }()
+
+ expectations := make(chan chan *ProducerError, len(msgs))
+ go func() {
+ for _, msg := range msgs {
+ expectation := make(chan *ProducerError, 1)
+ msg.Metadata = expectation
+ sp.producer.Input() <- msg
+ expectations <- expectation
+ }
+ close(expectations)
+ }()
+
+ var errors ProducerErrors
+ for expectation := range expectations {
+ if err := <-expectation; err != nil {
+ errors = append(errors, err)
+ }
+ }
+
+ if len(errors) > 0 {
+ return errors
+ }
+ return nil
+}
+
+func (sp *syncProducer) handleSuccesses() {
+ defer sp.wg.Done()
+ for msg := range sp.producer.Successes() {
+ expectation := msg.Metadata.(chan *ProducerError)
+ expectation <- nil
+ }
+}
+
+func (sp *syncProducer) handleErrors() {
+ defer sp.wg.Done()
+ for err := range sp.producer.Errors() {
+ expectation := err.Msg.Metadata.(chan *ProducerError)
+ expectation <- err
+ }
+}
+
+func (sp *syncProducer) Close() error {
+ sp.producer.AsyncClose()
+ sp.wg.Wait()
+ return nil
+}
diff --git a/vendor/github.com/Shopify/sarama/utils.go b/vendor/github.com/Shopify/sarama/utils.go
new file mode 100644
index 00000000..d36db921
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/utils.go
@@ -0,0 +1,153 @@
+package sarama
+
+import (
+ "bufio"
+ "net"
+ "sort"
+)
+
+type none struct{}
+
+// make []int32 sortable so we can sort partition numbers
+type int32Slice []int32
+
+func (slice int32Slice) Len() int {
+ return len(slice)
+}
+
+func (slice int32Slice) Less(i, j int) bool {
+ return slice[i] < slice[j]
+}
+
+func (slice int32Slice) Swap(i, j int) {
+ slice[i], slice[j] = slice[j], slice[i]
+}
+
+func dupeAndSort(input []int32) []int32 {
+ ret := make([]int32, 0, len(input))
+ for _, val := range input {
+ ret = append(ret, val)
+ }
+
+ sort.Sort(int32Slice(ret))
+ return ret
+}
+
+func withRecover(fn func()) {
+ defer func() {
+ handler := PanicHandler
+ if handler != nil {
+ if err := recover(); err != nil {
+ handler(err)
+ }
+ }
+ }()
+
+ fn()
+}
+
+func safeAsyncClose(b *Broker) {
+ tmp := b // local var prevents clobbering in goroutine
+ go withRecover(func() {
+ if connected, _ := tmp.Connected(); connected {
+ if err := tmp.Close(); err != nil {
+ Logger.Println("Error closing broker", tmp.ID(), ":", err)
+ }
+ }
+ })
+}
+
+// Encoder is a simple interface for any type that can be encoded as an array of bytes
+// in order to be sent as the key or value of a Kafka message. Length() is provided as an
+// optimization, and must return the same as len() on the result of Encode().
+type Encoder interface {
+ Encode() ([]byte, error)
+ Length() int
+}
+
+// make strings and byte slices encodable for convenience so they can be used as keys
+// and/or values in kafka messages
+
+// StringEncoder implements the Encoder interface for Go strings so that they can be used
+// as the Key or Value in a ProducerMessage.
+type StringEncoder string
+
+func (s StringEncoder) Encode() ([]byte, error) {
+ return []byte(s), nil
+}
+
+func (s StringEncoder) Length() int {
+ return len(s)
+}
+
+// ByteEncoder implements the Encoder interface for Go byte slices so that they can be used
+// as the Key or Value in a ProducerMessage.
+type ByteEncoder []byte
+
+func (b ByteEncoder) Encode() ([]byte, error) {
+ return b, nil
+}
+
+func (b ByteEncoder) Length() int {
+ return len(b)
+}
+
+// bufConn wraps a net.Conn with a buffer for reads to reduce the number of
+// reads that trigger syscalls.
+type bufConn struct {
+ net.Conn
+ buf *bufio.Reader
+}
+
+func newBufConn(conn net.Conn) *bufConn {
+ return &bufConn{
+ Conn: conn,
+ buf: bufio.NewReader(conn),
+ }
+}
+
+func (bc *bufConn) Read(b []byte) (n int, err error) {
+ return bc.buf.Read(b)
+}
+
+// KafkaVersion instances represent versions of the upstream Kafka broker.
+type KafkaVersion struct {
+ // it's a struct rather than just typing the array directly to make it opaque and stop people
+ // generating their own arbitrary versions
+ version [4]uint
+}
+
+func newKafkaVersion(major, minor, veryMinor, patch uint) KafkaVersion {
+ return KafkaVersion{
+ version: [4]uint{major, minor, veryMinor, patch},
+ }
+}
+
+// IsAtLeast return true if and only if the version it is called on is
+// greater than or equal to the version passed in:
+// V1.IsAtLeast(V2) // false
+// V2.IsAtLeast(V1) // true
+func (v KafkaVersion) IsAtLeast(other KafkaVersion) bool {
+ for i := range v.version {
+ if v.version[i] > other.version[i] {
+ return true
+ } else if v.version[i] < other.version[i] {
+ return false
+ }
+ }
+ return true
+}
+
+// Effective constants defining the supported kafka versions.
+var (
+ V0_8_2_0 = newKafkaVersion(0, 8, 2, 0)
+ V0_8_2_1 = newKafkaVersion(0, 8, 2, 1)
+ V0_8_2_2 = newKafkaVersion(0, 8, 2, 2)
+ V0_9_0_0 = newKafkaVersion(0, 9, 0, 0)
+ V0_9_0_1 = newKafkaVersion(0, 9, 0, 1)
+ V0_10_0_0 = newKafkaVersion(0, 10, 0, 0)
+ V0_10_0_1 = newKafkaVersion(0, 10, 0, 1)
+ V0_10_1_0 = newKafkaVersion(0, 10, 1, 0)
+ V0_10_2_0 = newKafkaVersion(0, 10, 2, 0)
+ minVersion = V0_8_2_0
+)
diff --git a/vendor/github.com/bsm/sarama-cluster/Gopkg.lock b/vendor/github.com/bsm/sarama-cluster/Gopkg.lock
new file mode 100644
index 00000000..4c018354
--- /dev/null
+++ b/vendor/github.com/bsm/sarama-cluster/Gopkg.lock
@@ -0,0 +1,87 @@
+# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'.
+
+
+[[projects]]
+ name = "github.com/Shopify/sarama"
+ packages = ["."]
+ revision = "c01858abb625b73a3af51d0798e4ad42c8147093"
+ version = "v1.12.0"
+
+[[projects]]
+ name = "github.com/davecgh/go-spew"
+ packages = ["spew"]
+ revision = "346938d642f2ec3594ed81d874461961cd0faa76"
+ version = "v1.1.0"
+
+[[projects]]
+ name = "github.com/eapache/go-resiliency"
+ packages = ["breaker"]
+ revision = "6800482f2c813e689c88b7ed3282262385011890"
+ version = "v1.0.0"
+
+[[projects]]
+ branch = "master"
+ name = "github.com/eapache/go-xerial-snappy"
+ packages = ["."]
+ revision = "bb955e01b9346ac19dc29eb16586c90ded99a98c"
+
+[[projects]]
+ name = "github.com/eapache/queue"
+ packages = ["."]
+ revision = "ded5959c0d4e360646dc9e9908cff48666781367"
+ version = "v1.0.2"
+
+[[projects]]
+ branch = "master"
+ name = "github.com/golang/snappy"
+ packages = ["."]
+ revision = "553a641470496b2327abcac10b36396bd98e45c9"
+
+[[projects]]
+ name = "github.com/onsi/ginkgo"
+ packages = [".","config","extensions/table","internal/codelocation","internal/containernode","internal/failer","internal/leafnodes","internal/remote","internal/spec","internal/spec_iterator","internal/specrunner","internal/suite","internal/testingtproxy","internal/writer","reporters","reporters/stenographer","reporters/stenographer/support/go-colorable","reporters/stenographer/support/go-isatty","types"]
+ revision = "77a8c1e5c40d6bb6c5eb4dd4bdce9763564f6298"
+ version = "v1.3.1"
+
+[[projects]]
+ name = "github.com/onsi/gomega"
+ packages = [".","format","internal/assertion","internal/asyncassertion","internal/oraclematcher","internal/testingtsupport","matchers","matchers/support/goraph/bipartitegraph","matchers/support/goraph/edge","matchers/support/goraph/node","matchers/support/goraph/util","types"]
+ revision = "334b8f472b3af5d541c5642701c1e29e2126f486"
+ version = "v1.1.0"
+
+[[projects]]
+ name = "github.com/pierrec/lz4"
+ packages = ["."]
+ revision = "5c9560bfa9ace2bf86080bf40d46b34ae44604df"
+ version = "v1.0"
+
+[[projects]]
+ name = "github.com/pierrec/xxHash"
+ packages = ["xxHash32"]
+ revision = "f051bb7f1d1aaf1b5a665d74fb6b0217712c69f7"
+ version = "v0.1.1"
+
+[[projects]]
+ branch = "master"
+ name = "github.com/rcrowley/go-metrics"
+ packages = ["."]
+ revision = "1f30fe9094a513ce4c700b9a54458bbb0c96996c"
+
+[[projects]]
+ branch = "master"
+ name = "golang.org/x/sys"
+ packages = ["unix"]
+ revision = "6faef541c73732f438fb660a212750a9ba9f9362"
+
+[[projects]]
+ branch = "v2"
+ name = "gopkg.in/yaml.v2"
+ packages = ["."]
+ revision = "cd8b52f8269e0feb286dfeef29f8fe4d5b397e0b"
+
+[solve-meta]
+ analyzer-name = "dep"
+ analyzer-version = 1
+ inputs-digest = "462d8b02cbab63faf376d1ecf69285290127285bdea88fcd37e997cebe773b5c"
+ solver-name = "gps-cdcl"
+ solver-version = 1
diff --git a/vendor/github.com/bsm/sarama-cluster/Gopkg.toml b/vendor/github.com/bsm/sarama-cluster/Gopkg.toml
new file mode 100644
index 00000000..4f2ae6fe
--- /dev/null
+++ b/vendor/github.com/bsm/sarama-cluster/Gopkg.toml
@@ -0,0 +1,26 @@
+
+# Gopkg.toml example
+#
+# Refer to https://github.com/golang/dep/blob/master/docs/Gopkg.toml.md
+# for detailed Gopkg.toml documentation.
+#
+# required = ["github.com/user/thing/cmd/thing"]
+# ignored = ["github.com/user/project/pkgX", "bitbucket.org/user/project/pkgA/pkgY"]
+#
+# [[constraint]]
+# name = "github.com/user/project"
+# version = "1.0.0"
+#
+# [[constraint]]
+# name = "github.com/user/project2"
+# branch = "dev"
+# source = "github.com/myfork/project2"
+#
+# [[override]]
+# name = "github.com/x/y"
+# version = "2.4.0"
+
+
+[[constraint]]
+ name = "github.com/Shopify/sarama"
+ version = "1.9.0"
diff --git a/vendor/github.com/bsm/sarama-cluster/LICENSE b/vendor/github.com/bsm/sarama-cluster/LICENSE
new file mode 100644
index 00000000..127751c4
--- /dev/null
+++ b/vendor/github.com/bsm/sarama-cluster/LICENSE
@@ -0,0 +1,22 @@
+(The MIT License)
+
+Copyright (c) 2017 Black Square Media Ltd
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+'Software'), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice shall be
+included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/vendor/github.com/bsm/sarama-cluster/Makefile b/vendor/github.com/bsm/sarama-cluster/Makefile
new file mode 100644
index 00000000..449de4dd
--- /dev/null
+++ b/vendor/github.com/bsm/sarama-cluster/Makefile
@@ -0,0 +1,35 @@
+SCALA_VERSION?= 2.12
+KAFKA_VERSION?= 0.10.2.0
+KAFKA_DIR= kafka_$(SCALA_VERSION)-$(KAFKA_VERSION)
+KAFKA_SRC= http://www.mirrorservice.org/sites/ftp.apache.org/kafka/$(KAFKA_VERSION)/$(KAFKA_DIR).tgz
+KAFKA_ROOT= testdata/$(KAFKA_DIR)
+PKG=$(shell glide nv)
+
+default: vet test
+
+vet:
+ go vet $(PKG)
+
+test: testdeps
+ KAFKA_DIR=$(KAFKA_DIR) go test $(PKG) -ginkgo.slowSpecThreshold=60
+
+test-verbose: testdeps
+ KAFKA_DIR=$(KAFKA_DIR) go test $(PKG) -ginkgo.slowSpecThreshold=60 -v
+
+test-race: testdeps
+ KAFKA_DIR=$(KAFKA_DIR) go test $(PKG) -ginkgo.slowSpecThreshold=60 -v -race
+
+testdeps: $(KAFKA_ROOT)
+
+doc: README.md
+
+.PHONY: test testdeps vet doc
+
+# ---------------------------------------------------------------------
+
+$(KAFKA_ROOT):
+ @mkdir -p $(dir $@)
+ cd $(dir $@) && curl -sSL $(KAFKA_SRC) | tar xz
+
+README.md: README.md.tpl $(wildcard *.go)
+ becca -package $(subst $(GOPATH)/src/,,$(PWD))
diff --git a/vendor/github.com/bsm/sarama-cluster/README.md b/vendor/github.com/bsm/sarama-cluster/README.md
new file mode 100644
index 00000000..42f9030a
--- /dev/null
+++ b/vendor/github.com/bsm/sarama-cluster/README.md
@@ -0,0 +1,86 @@
+# Sarama Cluster
+
+[![GoDoc](https://godoc.org/github.com/bsm/sarama-cluster?status.svg)](https://godoc.org/github.com/bsm/sarama-cluster)
+[![Build Status](https://travis-ci.org/bsm/sarama-cluster.svg?branch=master)](https://travis-ci.org/bsm/sarama-cluster)
+[![Go Report Card](https://goreportcard.com/badge/github.com/bsm/sarama-cluster)](https://goreportcard.com/report/github.com/bsm/sarama-cluster)
+[![License](https://img.shields.io/badge/License-MIT-blue.svg)](https://opensource.org/licenses/MIT)
+
+Cluster extensions for [Sarama](https://github.com/Shopify/sarama), the Go client library for Apache Kafka 0.9 (and later).
+
+## Documentation
+
+Documentation and example are available via godoc at http://godoc.org/github.com/bsm/sarama-cluster
+
+## Example
+
+```go
+package main
+
+import (
+ "fmt"
+ "log"
+ "os"
+ "os/signal"
+
+ cluster "github.com/bsm/sarama-cluster"
+)
+
+func main() {
+
+ // init (custom) config, enable errors and notifications
+ config := cluster.NewConfig()
+ config.Consumer.Return.Errors = true
+ config.Group.Return.Notifications = true
+
+ // init consumer
+ brokers := []string{"127.0.0.1:9092"}
+ topics := []string{"my_topic", "other_topic"}
+ consumer, err := cluster.NewConsumer(brokers, "my-consumer-group", topics, config)
+ if err != nil {
+ panic(err)
+ }
+ defer consumer.Close()
+
+ // trap SIGINT to trigger a shutdown.
+ signals := make(chan os.Signal, 1)
+ signal.Notify(signals, os.Interrupt)
+
+ // consume messages, watch errors and notifications
+ for {
+ select {
+ case msg, more := <-consumer.Messages():
+ if more {
+ fmt.Fprintf(os.Stdout, "%s/%d/%d\t%s\t%s\n", msg.Topic, msg.Partition, msg.Offset, msg.Key, msg.Value)
+ consumer.MarkOffset(msg, "") // mark message as processed
+ }
+ case err, more := <-consumer.Errors():
+ if more {
+ log.Printf("Error: %s\n", err.Error())
+ }
+ case ntf, more := <-consumer.Notifications():
+ if more {
+ log.Printf("Rebalanced: %+v\n", ntf)
+ }
+ case <-signals:
+ return
+ }
+ }
+}
+```
+
+## Running tests
+
+You need to install Ginkgo & Gomega to run tests. Please see
+http://onsi.github.io/ginkgo for more details.
+
+To run tests, call:
+
+ $ make test
+
+## Troubleshooting
+
+### Consumer not receiving any messages?
+
+By default, sarama's `Config.Consumer.Offsets.Initial` is set to `sarama.OffsetNewest`. This means that in the event that a brand new consumer is created, and it has never committed any offsets to kafka, it will only receive messages starting from the message after the current one that was written.
+
+If you wish to receive all messages (from the start of all messages in the topic) in the event that a consumer does not have any offsets committed to kafka, you need to set `Config.Consumer.Offsets.Initial` to `sarama.OffsetOldest`.
diff --git a/vendor/github.com/bsm/sarama-cluster/README.md.tpl b/vendor/github.com/bsm/sarama-cluster/README.md.tpl
new file mode 100644
index 00000000..3576941e
--- /dev/null
+++ b/vendor/github.com/bsm/sarama-cluster/README.md.tpl
@@ -0,0 +1,46 @@
+# Sarama Cluster
+
+[![GoDoc](https://godoc.org/github.com/bsm/sarama-cluster?status.svg)](https://godoc.org/github.com/bsm/sarama-cluster)
+[![Build Status](https://travis-ci.org/bsm/sarama-cluster.svg?branch=master)](https://travis-ci.org/bsm/sarama-cluster)
+[![Go Report Card](https://goreportcard.com/badge/github.com/bsm/sarama-cluster)](https://goreportcard.com/report/github.com/bsm/sarama-cluster)
+[![License](https://img.shields.io/badge/License-MIT-blue.svg)](https://opensource.org/licenses/MIT)
+
+Cluster extensions for [Sarama](https://github.com/Shopify/sarama), the Go client library for Apache Kafka 0.9 (and later).
+
+## Documentation
+
+Documentation and example are available via godoc at http://godoc.org/github.com/bsm/sarama-cluster
+
+## Example
+
+```go
+package main
+
+import (
+ "fmt"
+ "log"
+ "os"
+ "os/signal"
+
+ cluster "github.com/bsm/sarama-cluster"
+)
+
+func main() {{ "ExampleConsumer" | code }}
+```
+
+## Running tests
+
+You need to install Ginkgo & Gomega to run tests. Please see
+http://onsi.github.io/ginkgo for more details.
+
+To run tests, call:
+
+ $ make test
+
+## Troubleshooting
+
+### Consumer not receiving any messages?
+
+By default, sarama's `Config.Consumer.Offsets.Initial` is set to `sarama.OffsetNewest`. This means that in the event that a brand new consumer is created, and it has never committed any offsets to kafka, it will only receive messages starting from the message after the current one that was written.
+
+If you wish to receive all messages (from the start of all messages in the topic) in the event that a consumer does not have any offsets committed to kafka, you need to set `Config.Consumer.Offsets.Initial` to `sarama.OffsetOldest`.
diff --git a/vendor/github.com/bsm/sarama-cluster/balancer.go b/vendor/github.com/bsm/sarama-cluster/balancer.go
new file mode 100644
index 00000000..d66ef71a
--- /dev/null
+++ b/vendor/github.com/bsm/sarama-cluster/balancer.go
@@ -0,0 +1,144 @@
+package cluster
+
+import (
+ "math"
+ "sort"
+
+ "github.com/Shopify/sarama"
+)
+
+// Notification events are emitted by the consumers on rebalancing
+type Notification struct {
+ // Claimed contains topic/partitions that were claimed by this rebalance cycle
+ Claimed map[string][]int32
+
+ // Released contains topic/partitions that were released as part of this rebalance cycle
+ Released map[string][]int32
+
+ // Current are topic/partitions that are currently claimed to the consumer
+ Current map[string][]int32
+}
+
+func newNotification(released map[string][]int32) *Notification {
+ return &Notification{
+ Claimed: make(map[string][]int32),
+ Released: released,
+ Current: make(map[string][]int32),
+ }
+}
+
+func (n *Notification) claim(current map[string][]int32) {
+ previous := n.Released
+ for topic, partitions := range current {
+ n.Claimed[topic] = int32Slice(partitions).Diff(int32Slice(previous[topic]))
+ }
+ for topic, partitions := range previous {
+ n.Released[topic] = int32Slice(partitions).Diff(int32Slice(current[topic]))
+ }
+ n.Current = current
+}
+
+// --------------------------------------------------------------------
+
+type topicInfo struct {
+ Partitions []int32
+ MemberIDs []string
+}
+
+func (info topicInfo) Perform(s Strategy) map[string][]int32 {
+ if s == StrategyRoundRobin {
+ return info.RoundRobin()
+ }
+ return info.Ranges()
+}
+
+func (info topicInfo) Ranges() map[string][]int32 {
+ sort.Strings(info.MemberIDs)
+
+ mlen := len(info.MemberIDs)
+ plen := len(info.Partitions)
+ res := make(map[string][]int32, mlen)
+
+ for pos, memberID := range info.MemberIDs {
+ n, i := float64(plen)/float64(mlen), float64(pos)
+ min := int(math.Floor(i*n + 0.5))
+ max := int(math.Floor((i+1)*n + 0.5))
+ sub := info.Partitions[min:max]
+ if len(sub) > 0 {
+ res[memberID] = sub
+ }
+ }
+ return res
+}
+
+func (info topicInfo) RoundRobin() map[string][]int32 {
+ sort.Strings(info.MemberIDs)
+
+ mlen := len(info.MemberIDs)
+ res := make(map[string][]int32, mlen)
+ for i, pnum := range info.Partitions {
+ memberID := info.MemberIDs[i%mlen]
+ res[memberID] = append(res[memberID], pnum)
+ }
+ return res
+}
+
+// --------------------------------------------------------------------
+
+type balancer struct {
+ client sarama.Client
+ topics map[string]topicInfo
+}
+
+func newBalancerFromMeta(client sarama.Client, members map[string]sarama.ConsumerGroupMemberMetadata) (*balancer, error) {
+ balancer := newBalancer(client)
+ for memberID, meta := range members {
+ for _, topic := range meta.Topics {
+ if err := balancer.Topic(topic, memberID); err != nil {
+ return nil, err
+ }
+ }
+ }
+ return balancer, nil
+}
+
+func newBalancer(client sarama.Client) *balancer {
+ return &balancer{
+ client: client,
+ topics: make(map[string]topicInfo),
+ }
+}
+
+func (r *balancer) Topic(name string, memberID string) error {
+ topic, ok := r.topics[name]
+ if !ok {
+ nums, err := r.client.Partitions(name)
+ if err != nil {
+ return err
+ }
+ topic = topicInfo{
+ Partitions: nums,
+ MemberIDs: make([]string, 0, 1),
+ }
+ }
+ topic.MemberIDs = append(topic.MemberIDs, memberID)
+ r.topics[name] = topic
+ return nil
+}
+
+func (r *balancer) Perform(s Strategy) map[string]map[string][]int32 {
+ if r == nil {
+ return nil
+ }
+
+ res := make(map[string]map[string][]int32, 1)
+ for topic, info := range r.topics {
+ for memberID, partitions := range info.Perform(s) {
+ if _, ok := res[memberID]; !ok {
+ res[memberID] = make(map[string][]int32, 1)
+ }
+ res[memberID][topic] = partitions
+ }
+ }
+ return res
+}
diff --git a/vendor/github.com/bsm/sarama-cluster/client.go b/vendor/github.com/bsm/sarama-cluster/client.go
new file mode 100644
index 00000000..2cfac5d6
--- /dev/null
+++ b/vendor/github.com/bsm/sarama-cluster/client.go
@@ -0,0 +1,28 @@
+package cluster
+
+import "github.com/Shopify/sarama"
+
+// Client is a group client
+type Client struct {
+ sarama.Client
+ config Config
+ own bool
+}
+
+// NewClient creates a new client instance
+func NewClient(addrs []string, config *Config) (*Client, error) {
+ if config == nil {
+ config = NewConfig()
+ }
+
+ if err := config.Validate(); err != nil {
+ return nil, err
+ }
+
+ client, err := sarama.NewClient(addrs, &config.Config)
+ if err != nil {
+ return nil, err
+ }
+
+ return &Client{Client: client, config: *config}, nil
+}
diff --git a/vendor/github.com/bsm/sarama-cluster/cluster.go b/vendor/github.com/bsm/sarama-cluster/cluster.go
new file mode 100644
index 00000000..760d0c73
--- /dev/null
+++ b/vendor/github.com/bsm/sarama-cluster/cluster.go
@@ -0,0 +1,73 @@
+package cluster
+
+import (
+ "fmt"
+ "sort"
+)
+
+// Strategy for partition to consumer assignement
+type Strategy string
+
+const (
+ // StrategyRange is the default and assigns partition ranges to consumers.
+ // Example with six partitions and two consumers:
+ // C1: [0, 1, 2]
+ // C2: [3, 4, 5]
+ StrategyRange Strategy = "range"
+
+ // StrategyRoundRobin assigns partitions by alternating over consumers.
+ // Example with six partitions and two consumers:
+ // C1: [0, 2, 4]
+ // C2: [1, 3, 5]
+ StrategyRoundRobin Strategy = "roundrobin"
+)
+
+// Error instances are wrappers for internal errors with a context and
+// may be returned through the consumer's Errors() channel
+type Error struct {
+ Ctx string
+ error
+}
+
+// --------------------------------------------------------------------
+
+type none struct{}
+
+type topicPartition struct {
+ Topic string
+ Partition int32
+}
+
+func (tp *topicPartition) String() string {
+ return fmt.Sprintf("%s-%d", tp.Topic, tp.Partition)
+}
+
+type offsetInfo struct {
+ Offset int64
+ Metadata string
+}
+
+func (i offsetInfo) NextOffset(fallback int64) int64 {
+ if i.Offset > -1 {
+ return i.Offset
+ }
+ return fallback
+}
+
+type int32Slice []int32
+
+func (p int32Slice) Len() int { return len(p) }
+func (p int32Slice) Less(i, j int) bool { return p[i] < p[j] }
+func (p int32Slice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
+
+func (p int32Slice) Diff(o int32Slice) (res []int32) {
+ on := len(o)
+ for _, x := range p {
+ n := sort.Search(on, func(i int) bool { return o[i] >= x })
+ if n < on && o[n] == x {
+ continue
+ }
+ res = append(res, x)
+ }
+ return
+}
diff --git a/vendor/github.com/bsm/sarama-cluster/config.go b/vendor/github.com/bsm/sarama-cluster/config.go
new file mode 100644
index 00000000..2588794e
--- /dev/null
+++ b/vendor/github.com/bsm/sarama-cluster/config.go
@@ -0,0 +1,122 @@
+package cluster
+
+import (
+ "regexp"
+ "time"
+
+ "github.com/Shopify/sarama"
+)
+
+var minVersion = sarama.V0_9_0_0
+
+// Config extends sarama.Config with Group specific namespace
+type Config struct {
+ sarama.Config
+
+ // Group is the namespace for group management properties
+ Group struct {
+ // The strategy to use for the allocation of partitions to consumers (defaults to StrategyRange)
+ PartitionStrategy Strategy
+ Offsets struct {
+ Retry struct {
+ // The numer retries when committing offsets (defaults to 3).
+ Max int
+ }
+ Synchronization struct {
+ // The duration allowed for other clients to commit their offsets before resumption in this client, e.g. during a rebalance
+ // NewConfig sets this to the Consumer.MaxProcessingTime duration of the Sarama configuration
+ DwellTime time.Duration
+ }
+ }
+ Session struct {
+ // The allowed session timeout for registered consumers (defaults to 30s).
+ // Must be within the allowed server range.
+ Timeout time.Duration
+ }
+ Heartbeat struct {
+ // Interval between each heartbeat (defaults to 3s). It should be no more
+ // than 1/3rd of the Group.Session.Timout setting
+ Interval time.Duration
+ }
+ // Return specifies which group channels will be populated. If they are set to true,
+ // you must read from the respective channels to prevent deadlock.
+ Return struct {
+ // If enabled, rebalance notification will be returned on the
+ // Notifications channel (default disabled).
+ Notifications bool
+ }
+
+ Topics struct {
+ // An additional whitelist of topics to subscribe to.
+ Whitelist *regexp.Regexp
+ // An additional blacklist of topics to avoid. If set, this will precede over
+ // the Whitelist setting.
+ Blacklist *regexp.Regexp
+ }
+
+ Member struct {
+ // Custom metadata to include when joining the group. The user data for all joined members
+ // can be retrieved by sending a DescribeGroupRequest to the broker that is the
+ // coordinator for the group.
+ UserData []byte
+ }
+ }
+}
+
+// NewConfig returns a new configuration instance with sane defaults.
+func NewConfig() *Config {
+ c := &Config{
+ Config: *sarama.NewConfig(),
+ }
+ c.Group.PartitionStrategy = StrategyRange
+ c.Group.Offsets.Retry.Max = 3
+ c.Group.Offsets.Synchronization.DwellTime = c.Consumer.MaxProcessingTime
+ c.Group.Session.Timeout = 30 * time.Second
+ c.Group.Heartbeat.Interval = 3 * time.Second
+ c.Config.Version = minVersion
+ return c
+}
+
+// Validate checks a Config instance. It will return a
+// sarama.ConfigurationError if the specified values don't make sense.
+func (c *Config) Validate() error {
+ if c.Group.Heartbeat.Interval%time.Millisecond != 0 {
+ sarama.Logger.Println("Group.Heartbeat.Interval only supports millisecond precision; nanoseconds will be truncated.")
+ }
+ if c.Group.Session.Timeout%time.Millisecond != 0 {
+ sarama.Logger.Println("Group.Session.Timeout only supports millisecond precision; nanoseconds will be truncated.")
+ }
+ if c.Group.PartitionStrategy != StrategyRange && c.Group.PartitionStrategy != StrategyRoundRobin {
+ sarama.Logger.Println("Group.PartitionStrategy is not supported; range will be assumed.")
+ }
+ if !c.Version.IsAtLeast(minVersion) {
+ sarama.Logger.Println("Version is not supported; 0.9. will be assumed.")
+ c.Version = minVersion
+ }
+ if err := c.Config.Validate(); err != nil {
+ return err
+ }
+
+ // validate the Group values
+ switch {
+ case c.Group.Offsets.Retry.Max < 0:
+ return sarama.ConfigurationError("Group.Offsets.Retry.Max must be >= 0")
+ case c.Group.Offsets.Synchronization.DwellTime <= 0:
+ return sarama.ConfigurationError("Group.Offsets.Synchronization.DwellTime must be > 0")
+ case c.Group.Offsets.Synchronization.DwellTime > 10*time.Minute:
+ return sarama.ConfigurationError("Group.Offsets.Synchronization.DwellTime must be <= 10m")
+ case c.Group.Heartbeat.Interval <= 0:
+ return sarama.ConfigurationError("Group.Heartbeat.Interval must be > 0")
+ case c.Group.Session.Timeout <= 0:
+ return sarama.ConfigurationError("Group.Session.Timeout must be > 0")
+ }
+
+ // ensure offset is correct
+ switch c.Consumer.Offsets.Initial {
+ case sarama.OffsetOldest, sarama.OffsetNewest:
+ default:
+ return sarama.ConfigurationError("Consumer.Offsets.Initial must be either OffsetOldest or OffsetNewest")
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/bsm/sarama-cluster/consumer.go b/vendor/github.com/bsm/sarama-cluster/consumer.go
new file mode 100644
index 00000000..619e0b5d
--- /dev/null
+++ b/vendor/github.com/bsm/sarama-cluster/consumer.go
@@ -0,0 +1,799 @@
+package cluster
+
+import (
+ "sort"
+ "sync"
+ "sync/atomic"
+ "time"
+
+ "github.com/Shopify/sarama"
+)
+
+// Consumer is a cluster group consumer
+type Consumer struct {
+ client *Client
+
+ csmr sarama.Consumer
+ subs *partitionMap
+
+ consumerID string
+ generationID int32
+ groupID string
+ memberID string
+
+ coreTopics []string
+ extraTopics []string
+
+ dying, dead chan none
+
+ consuming int32
+ errors chan error
+ messages chan *sarama.ConsumerMessage
+ notifications chan *Notification
+
+ commitMu sync.Mutex
+}
+
+// NewConsumerFromClient initializes a new consumer from an existing client
+func NewConsumerFromClient(client *Client, groupID string, topics []string) (*Consumer, error) {
+ csmr, err := sarama.NewConsumerFromClient(client.Client)
+ if err != nil {
+ return nil, err
+ }
+
+ sort.Strings(topics)
+ c := &Consumer{
+ client: client,
+ csmr: csmr,
+ subs: newPartitionMap(),
+ groupID: groupID,
+
+ coreTopics: topics,
+
+ dying: make(chan none),
+ dead: make(chan none),
+
+ errors: make(chan error, client.config.ChannelBufferSize),
+ messages: make(chan *sarama.ConsumerMessage),
+ notifications: make(chan *Notification, 1),
+ }
+ if err := c.client.RefreshCoordinator(groupID); err != nil {
+ return nil, err
+ }
+
+ go c.mainLoop()
+ return c, nil
+}
+
+// NewConsumer initializes a new consumer
+func NewConsumer(addrs []string, groupID string, topics []string, config *Config) (*Consumer, error) {
+ client, err := NewClient(addrs, config)
+ if err != nil {
+ return nil, err
+ }
+
+ consumer, err := NewConsumerFromClient(client, groupID, topics)
+ if err != nil {
+ _ = client.Close()
+ return nil, err
+ }
+ consumer.client.own = true
+ return consumer, nil
+}
+
+// Messages returns the read channel for the messages that are returned by
+// the broker.
+func (c *Consumer) Messages() <-chan *sarama.ConsumerMessage { return c.messages }
+
+// Errors returns a read channel of errors that occur during offset management, if
+// enabled. By default, errors are logged and not returned over this channel. If
+// you want to implement any custom error handling, set your config's
+// Consumer.Return.Errors setting to true, and read from this channel.
+func (c *Consumer) Errors() <-chan error { return c.errors }
+
+// Notifications returns a channel of Notifications that occur during consumer
+// rebalancing. Notifications will only be emitted over this channel, if your config's
+// Group.Return.Notifications setting to true.
+func (c *Consumer) Notifications() <-chan *Notification { return c.notifications }
+
+// HighWaterMarks returns the current high water marks for each topic and partition
+// Consistency between partitions is not guaranteed since high water marks are updated separately.
+func (c *Consumer) HighWaterMarks() map[string]map[int32]int64 { return c.csmr.HighWaterMarks() }
+
+// MarkOffset marks the provided message as processed, alongside a metadata string
+// that represents the state of the partition consumer at that point in time. The
+// metadata string can be used by another consumer to restore that state, so it
+// can resume consumption.
+//
+// Note: calling MarkOffset does not necessarily commit the offset to the backend
+// store immediately for efficiency reasons, and it may never be committed if
+// your application crashes. This means that you may end up processing the same
+// message twice, and your processing should ideally be idempotent.
+func (c *Consumer) MarkOffset(msg *sarama.ConsumerMessage, metadata string) {
+ c.subs.Fetch(msg.Topic, msg.Partition).MarkOffset(msg.Offset+1, metadata)
+}
+
+// MarkPartitionOffset marks an offset of the provided topic/partition as processed.
+// See MarkOffset for additional explanation.
+func (c *Consumer) MarkPartitionOffset(topic string, partition int32, offset int64, metadata string) {
+ c.subs.Fetch(topic, partition).MarkOffset(offset+1, metadata)
+}
+
+// MarkOffsets marks stashed offsets as processed.
+// See MarkOffset for additional explanation.
+func (c *Consumer) MarkOffsets(s *OffsetStash) {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+
+ for tp, info := range s.offsets {
+ c.subs.Fetch(tp.Topic, tp.Partition).MarkOffset(info.Offset+1, info.Metadata)
+ delete(s.offsets, tp)
+ }
+}
+
+// Subscriptions returns the consumed topics and partitions
+func (c *Consumer) Subscriptions() map[string][]int32 {
+ return c.subs.Info()
+}
+
+// CommitOffsets manually commits marked offsets.
+func (c *Consumer) CommitOffsets() error {
+ c.commitMu.Lock()
+ defer c.commitMu.Unlock()
+
+ req := &sarama.OffsetCommitRequest{
+ Version: 2,
+ ConsumerGroup: c.groupID,
+ ConsumerGroupGeneration: c.generationID,
+ ConsumerID: c.memberID,
+ RetentionTime: -1,
+ }
+
+ if ns := c.client.config.Consumer.Offsets.Retention; ns != 0 {
+ req.RetentionTime = int64(ns / time.Millisecond)
+ }
+
+ snap := c.subs.Snapshot()
+ dirty := false
+ for tp, state := range snap {
+ if state.Dirty {
+ dirty = true
+ req.AddBlock(tp.Topic, tp.Partition, state.Info.Offset, 0, state.Info.Metadata)
+ }
+ }
+ if !dirty {
+ return nil
+ }
+
+ broker, err := c.client.Coordinator(c.groupID)
+ if err != nil {
+ c.closeCoordinator(broker, err)
+ return err
+ }
+
+ resp, err := broker.CommitOffset(req)
+ if err != nil {
+ c.closeCoordinator(broker, err)
+ return err
+ }
+
+ for topic, errs := range resp.Errors {
+ for partition, kerr := range errs {
+ if kerr != sarama.ErrNoError {
+ err = kerr
+ } else if state, ok := snap[topicPartition{topic, partition}]; ok {
+ c.subs.Fetch(topic, partition).MarkCommitted(state.Info.Offset)
+ }
+ }
+ }
+ return err
+}
+
+// Close safely closes the consumer and releases all resources
+func (c *Consumer) Close() (err error) {
+ select {
+ case <-c.dying:
+ return
+ default:
+ close(c.dying)
+ }
+ <-c.dead
+
+ if e := c.release(); e != nil {
+ err = e
+ }
+ if e := c.csmr.Close(); e != nil {
+ err = e
+ }
+ close(c.messages)
+ close(c.errors)
+
+ if e := c.leaveGroup(); e != nil {
+ err = e
+ }
+ close(c.notifications)
+
+ if c.client.own {
+ if e := c.client.Close(); e != nil {
+ err = e
+ }
+ }
+
+ return
+}
+
+func (c *Consumer) mainLoop() {
+ defer close(c.dead)
+ defer atomic.StoreInt32(&c.consuming, 0)
+
+ for {
+ atomic.StoreInt32(&c.consuming, 0)
+
+ // Check if close was requested
+ select {
+ case <-c.dying:
+ return
+ default:
+ }
+
+ // Remember previous subscriptions
+ var notification *Notification
+ if c.client.config.Group.Return.Notifications {
+ notification = newNotification(c.subs.Info())
+ }
+
+ // Rebalance, fetch new subscriptions
+ subs, err := c.rebalance()
+ if err != nil {
+ c.rebalanceError(err, notification)
+ continue
+ }
+
+ // Start the heartbeat
+ hbStop, hbDone := make(chan none), make(chan none)
+ go c.hbLoop(hbStop, hbDone)
+
+ // Subscribe to topic/partitions
+ if err := c.subscribe(subs); err != nil {
+ close(hbStop)
+ <-hbDone
+ c.rebalanceError(err, notification)
+ continue
+ }
+
+ // Start topic watcher loop
+ twStop, twDone := make(chan none), make(chan none)
+ go c.twLoop(twStop, twDone)
+
+ // Start consuming and committing offsets
+ cmStop, cmDone := make(chan none), make(chan none)
+ go c.cmLoop(cmStop, cmDone)
+ atomic.StoreInt32(&c.consuming, 1)
+
+ // Update notification with new claims
+ if c.client.config.Group.Return.Notifications {
+ notification.claim(subs)
+ c.notifications <- notification
+ }
+
+ // Wait for signals
+ select {
+ case <-hbDone:
+ close(cmStop)
+ close(twStop)
+ <-cmDone
+ <-twDone
+ case <-twDone:
+ close(cmStop)
+ close(hbStop)
+ <-cmDone
+ <-hbDone
+ case <-cmDone:
+ close(twStop)
+ close(hbStop)
+ <-twDone
+ <-hbDone
+ case <-c.dying:
+ close(cmStop)
+ close(twStop)
+ close(hbStop)
+ <-cmDone
+ <-twDone
+ <-hbDone
+ return
+ }
+ }
+}
+
+// heartbeat loop, triggered by the mainLoop
+func (c *Consumer) hbLoop(stop <-chan none, done chan<- none) {
+ defer close(done)
+
+ ticker := time.NewTicker(c.client.config.Group.Heartbeat.Interval)
+ defer ticker.Stop()
+
+ for {
+ select {
+ case <-ticker.C:
+ switch err := c.heartbeat(); err {
+ case nil, sarama.ErrNoError:
+ case sarama.ErrNotCoordinatorForConsumer, sarama.ErrRebalanceInProgress:
+ return
+ default:
+ c.handleError(&Error{Ctx: "heartbeat", error: err})
+ return
+ }
+ case <-stop:
+ return
+ }
+ }
+}
+
+// topic watcher loop, triggered by the mainLoop
+func (c *Consumer) twLoop(stop <-chan none, done chan<- none) {
+ defer close(done)
+
+ ticker := time.NewTicker(c.client.config.Metadata.RefreshFrequency / 2)
+ defer ticker.Stop()
+
+ for {
+ select {
+ case <-ticker.C:
+ topics, err := c.client.Topics()
+ if err != nil {
+ c.handleError(&Error{Ctx: "topics", error: err})
+ return
+ }
+
+ for _, topic := range topics {
+ if !c.isKnownCoreTopic(topic) &&
+ !c.isKnownExtraTopic(topic) &&
+ c.isPotentialExtraTopic(topic) {
+ return
+ }
+ }
+ case <-stop:
+ return
+ }
+ }
+}
+
+// commit loop, triggered by the mainLoop
+func (c *Consumer) cmLoop(stop <-chan none, done chan<- none) {
+ defer close(done)
+
+ ticker := time.NewTicker(c.client.config.Consumer.Offsets.CommitInterval)
+ defer ticker.Stop()
+
+ for {
+ select {
+ case <-ticker.C:
+ if err := c.commitOffsetsWithRetry(c.client.config.Group.Offsets.Retry.Max); err != nil {
+ c.handleError(&Error{Ctx: "commit", error: err})
+ return
+ }
+ case <-stop:
+ return
+ }
+ }
+}
+
+func (c *Consumer) rebalanceError(err error, notification *Notification) {
+ if c.client.config.Group.Return.Notifications {
+ c.notifications <- notification
+ }
+
+ switch err {
+ case sarama.ErrRebalanceInProgress:
+ default:
+ c.handleError(&Error{Ctx: "rebalance", error: err})
+ }
+
+ select {
+ case <-c.dying:
+ case <-time.After(c.client.config.Metadata.Retry.Backoff):
+ }
+}
+
+func (c *Consumer) handleError(e *Error) {
+ if c.client.config.Consumer.Return.Errors {
+ select {
+ case c.errors <- e:
+ case <-c.dying:
+ return
+ }
+ } else {
+ sarama.Logger.Printf("%s error: %s\n", e.Ctx, e.Error())
+ }
+}
+
+// Releases the consumer and commits offsets, called from rebalance() and Close()
+func (c *Consumer) release() (err error) {
+ // Stop all consumers
+ c.subs.Stop()
+
+ // Clear subscriptions on exit
+ defer c.subs.Clear()
+
+ // Wait for messages to be processed
+ select {
+ case <-c.dying:
+ case <-time.After(c.client.config.Group.Offsets.Synchronization.DwellTime):
+ }
+
+ // Commit offsets, continue on errors
+ if e := c.commitOffsetsWithRetry(c.client.config.Group.Offsets.Retry.Max); e != nil {
+ err = e
+ }
+
+ return
+}
+
+// --------------------------------------------------------------------
+
+// Performs a heartbeat, part of the mainLoop()
+func (c *Consumer) heartbeat() error {
+ broker, err := c.client.Coordinator(c.groupID)
+ if err != nil {
+ c.closeCoordinator(broker, err)
+ return err
+ }
+
+ resp, err := broker.Heartbeat(&sarama.HeartbeatRequest{
+ GroupId: c.groupID,
+ MemberId: c.memberID,
+ GenerationId: c.generationID,
+ })
+ if err != nil {
+ c.closeCoordinator(broker, err)
+ return err
+ }
+ return resp.Err
+}
+
+// Performs a rebalance, part of the mainLoop()
+func (c *Consumer) rebalance() (map[string][]int32, error) {
+ sarama.Logger.Printf("cluster/consumer %s rebalance\n", c.memberID)
+
+ if err := c.refreshMetadata(); err != nil {
+ return nil, err
+ }
+
+ if err := c.client.RefreshCoordinator(c.groupID); err != nil {
+ return nil, err
+ }
+
+ allTopics, err := c.client.Topics()
+ if err != nil {
+ return nil, err
+ }
+ c.extraTopics = c.selectExtraTopics(allTopics)
+ sort.Strings(c.extraTopics)
+
+ // Release subscriptions
+ if err := c.release(); err != nil {
+ return nil, err
+ }
+
+ // Re-join consumer group
+ strategy, err := c.joinGroup()
+ switch {
+ case err == sarama.ErrUnknownMemberId:
+ c.memberID = ""
+ return nil, err
+ case err != nil:
+ return nil, err
+ }
+ // sarama.Logger.Printf("cluster/consumer %s/%d joined group %s\n", c.memberID, c.generationID, c.groupID)
+
+ // Sync consumer group state, fetch subscriptions
+ subs, err := c.syncGroup(strategy)
+ switch {
+ case err == sarama.ErrRebalanceInProgress:
+ return nil, err
+ case err != nil:
+ _ = c.leaveGroup()
+ return nil, err
+ }
+ return subs, nil
+}
+
+// Performs the subscription, part of the mainLoop()
+func (c *Consumer) subscribe(subs map[string][]int32) error {
+ // fetch offsets
+ offsets, err := c.fetchOffsets(subs)
+ if err != nil {
+ _ = c.leaveGroup()
+ return err
+ }
+
+ // create consumers in parallel
+ var mu sync.Mutex
+ var wg sync.WaitGroup
+
+ for topic, partitions := range subs {
+ for _, partition := range partitions {
+ wg.Add(1)
+
+ info := offsets[topic][partition]
+ go func(t string, p int32) {
+ if e := c.createConsumer(t, p, info); e != nil {
+ mu.Lock()
+ err = e
+ mu.Unlock()
+ }
+ wg.Done()
+ }(topic, partition)
+ }
+ }
+ wg.Wait()
+
+ if err != nil {
+ _ = c.release()
+ _ = c.leaveGroup()
+ }
+ return err
+}
+
+// --------------------------------------------------------------------
+
+// Send a request to the broker to join group on rebalance()
+func (c *Consumer) joinGroup() (*balancer, error) {
+ req := &sarama.JoinGroupRequest{
+ GroupId: c.groupID,
+ MemberId: c.memberID,
+ SessionTimeout: int32(c.client.config.Group.Session.Timeout / time.Millisecond),
+ ProtocolType: "consumer",
+ }
+
+ meta := &sarama.ConsumerGroupMemberMetadata{
+ Version: 1,
+ Topics: append(c.coreTopics, c.extraTopics...),
+ UserData: c.client.config.Group.Member.UserData,
+ }
+ err := req.AddGroupProtocolMetadata(string(StrategyRange), meta)
+ if err != nil {
+ return nil, err
+ }
+ err = req.AddGroupProtocolMetadata(string(StrategyRoundRobin), meta)
+ if err != nil {
+ return nil, err
+ }
+
+ broker, err := c.client.Coordinator(c.groupID)
+ if err != nil {
+ c.closeCoordinator(broker, err)
+ return nil, err
+ }
+
+ resp, err := broker.JoinGroup(req)
+ if err != nil {
+ c.closeCoordinator(broker, err)
+ return nil, err
+ } else if resp.Err != sarama.ErrNoError {
+ c.closeCoordinator(broker, resp.Err)
+ return nil, resp.Err
+ }
+
+ var strategy *balancer
+ if resp.LeaderId == resp.MemberId {
+ members, err := resp.GetMembers()
+ if err != nil {
+ return nil, err
+ }
+
+ strategy, err = newBalancerFromMeta(c.client, members)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ c.memberID = resp.MemberId
+ c.generationID = resp.GenerationId
+
+ return strategy, nil
+}
+
+// Send a request to the broker to sync the group on rebalance().
+// Returns a list of topics and partitions to consume.
+func (c *Consumer) syncGroup(strategy *balancer) (map[string][]int32, error) {
+ req := &sarama.SyncGroupRequest{
+ GroupId: c.groupID,
+ MemberId: c.memberID,
+ GenerationId: c.generationID,
+ }
+
+ for memberID, topics := range strategy.Perform(c.client.config.Group.PartitionStrategy) {
+ if err := req.AddGroupAssignmentMember(memberID, &sarama.ConsumerGroupMemberAssignment{
+ Version: 1,
+ Topics: topics,
+ }); err != nil {
+ return nil, err
+ }
+ }
+
+ broker, err := c.client.Coordinator(c.groupID)
+ if err != nil {
+ c.closeCoordinator(broker, err)
+ return nil, err
+ }
+
+ resp, err := broker.SyncGroup(req)
+ if err != nil {
+ c.closeCoordinator(broker, err)
+ return nil, err
+ } else if resp.Err != sarama.ErrNoError {
+ c.closeCoordinator(broker, resp.Err)
+ return nil, resp.Err
+ }
+
+ // Return if there is nothing to subscribe to
+ if len(resp.MemberAssignment) == 0 {
+ return nil, nil
+ }
+
+ // Get assigned subscriptions
+ members, err := resp.GetMemberAssignment()
+ if err != nil {
+ return nil, err
+ }
+
+ // Sort partitions, for each topic
+ for topic := range members.Topics {
+ sort.Sort(int32Slice(members.Topics[topic]))
+ }
+ return members.Topics, nil
+}
+
+// Fetches latest committed offsets for all subscriptions
+func (c *Consumer) fetchOffsets(subs map[string][]int32) (map[string]map[int32]offsetInfo, error) {
+ offsets := make(map[string]map[int32]offsetInfo, len(subs))
+ req := &sarama.OffsetFetchRequest{
+ Version: 1,
+ ConsumerGroup: c.groupID,
+ }
+
+ for topic, partitions := range subs {
+ offsets[topic] = make(map[int32]offsetInfo, len(partitions))
+ for _, partition := range partitions {
+ offsets[topic][partition] = offsetInfo{Offset: -1}
+ req.AddPartition(topic, partition)
+ }
+ }
+
+ // Wait for other cluster consumers to process, release and commit
+ // Times-two so that we are less likely to "win" a race against a sarama-cluster client that is:
+ // 1) losing a topic-partition in a rebalance, and therefore:
+ // 2) sleeping to allow some processing to complete, before:
+ // 3) committing the offsets.
+ // Note that this doesn't necessarily account for the end-to-end latency of the Kafka offsets topic.
+ select {
+ case <-c.dying:
+ return nil, sarama.ErrClosedClient
+ case <-time.After(c.client.config.Group.Offsets.Synchronization.DwellTime * 2):
+ }
+
+ broker, err := c.client.Coordinator(c.groupID)
+ if err != nil {
+ c.closeCoordinator(broker, err)
+ return nil, err
+ }
+
+ resp, err := broker.FetchOffset(req)
+ if err != nil {
+ c.closeCoordinator(broker, err)
+ return nil, err
+ }
+
+ for topic, partitions := range subs {
+ for _, partition := range partitions {
+ block := resp.GetBlock(topic, partition)
+ if block == nil {
+ return nil, sarama.ErrIncompleteResponse
+ }
+
+ if block.Err == sarama.ErrNoError {
+ offsets[topic][partition] = offsetInfo{Offset: block.Offset, Metadata: block.Metadata}
+ } else {
+ return nil, block.Err
+ }
+ }
+ }
+ return offsets, nil
+}
+
+// Send a request to the broker to leave the group on failes rebalance() and on Close()
+func (c *Consumer) leaveGroup() error {
+ broker, err := c.client.Coordinator(c.groupID)
+ if err != nil {
+ c.closeCoordinator(broker, err)
+ return err
+ }
+
+ if _, err = broker.LeaveGroup(&sarama.LeaveGroupRequest{
+ GroupId: c.groupID,
+ MemberId: c.memberID,
+ }); err != nil {
+ c.closeCoordinator(broker, err)
+ }
+ return err
+}
+
+// --------------------------------------------------------------------
+
+func (c *Consumer) createConsumer(topic string, partition int32, info offsetInfo) error {
+ sarama.Logger.Printf("cluster/consumer %s consume %s/%d from %d\n", c.memberID, topic, partition, info.NextOffset(c.client.config.Consumer.Offsets.Initial))
+
+ // Create partitionConsumer
+ pc, err := newPartitionConsumer(c.csmr, topic, partition, info, c.client.config.Consumer.Offsets.Initial)
+ if err != nil {
+ return err
+ }
+
+ // Store in subscriptions
+ c.subs.Store(topic, partition, pc)
+
+ // Start partition consumer goroutine
+ go pc.Loop(c.messages, c.errors)
+
+ return nil
+}
+
+func (c *Consumer) commitOffsetsWithRetry(retries int) error {
+ err := c.CommitOffsets()
+ if err != nil && retries > 0 {
+ return c.commitOffsetsWithRetry(retries - 1)
+ }
+ return err
+}
+
+func (c *Consumer) closeCoordinator(broker *sarama.Broker, err error) {
+ if broker != nil {
+ _ = broker.Close()
+ }
+
+ switch err {
+ case sarama.ErrConsumerCoordinatorNotAvailable, sarama.ErrNotCoordinatorForConsumer:
+ _ = c.client.RefreshCoordinator(c.groupID)
+ }
+}
+
+func (c *Consumer) selectExtraTopics(allTopics []string) []string {
+ extra := allTopics[:0]
+ for _, topic := range allTopics {
+ if !c.isKnownCoreTopic(topic) && c.isPotentialExtraTopic(topic) {
+ extra = append(extra, topic)
+ }
+ }
+ return extra
+}
+
+func (c *Consumer) isKnownCoreTopic(topic string) bool {
+ pos := sort.SearchStrings(c.coreTopics, topic)
+ return pos < len(c.coreTopics) && c.coreTopics[pos] == topic
+}
+
+func (c *Consumer) isKnownExtraTopic(topic string) bool {
+ pos := sort.SearchStrings(c.extraTopics, topic)
+ return pos < len(c.extraTopics) && c.extraTopics[pos] == topic
+}
+
+func (c *Consumer) isPotentialExtraTopic(topic string) bool {
+ rx := c.client.config.Group.Topics
+ if rx.Blacklist != nil && rx.Blacklist.MatchString(topic) {
+ return false
+ }
+ if rx.Whitelist != nil && rx.Whitelist.MatchString(topic) {
+ return true
+ }
+ return false
+}
+
+func (c *Consumer) refreshMetadata() error {
+ err := c.client.RefreshMetadata()
+ if err == sarama.ErrTopicAuthorizationFailed {
+ // maybe we didn't have authorization to describe all topics
+ err = c.client.RefreshMetadata(c.coreTopics...)
+ }
+ return err
+}
diff --git a/vendor/github.com/bsm/sarama-cluster/doc.go b/vendor/github.com/bsm/sarama-cluster/doc.go
new file mode 100644
index 00000000..9c8ff16a
--- /dev/null
+++ b/vendor/github.com/bsm/sarama-cluster/doc.go
@@ -0,0 +1,8 @@
+/*
+Package cluster provides cluster extensions for Sarama, enabing users
+to consume topics across from multiple, balanced nodes.
+
+It requires Kafka v0.9+ and follows the steps guide, described in:
+https://cwiki.apache.org/confluence/display/KAFKA/Kafka+0.9+Consumer+Rewrite+Design
+*/
+package cluster
diff --git a/vendor/github.com/bsm/sarama-cluster/offsets.go b/vendor/github.com/bsm/sarama-cluster/offsets.go
new file mode 100644
index 00000000..b2abe355
--- /dev/null
+++ b/vendor/github.com/bsm/sarama-cluster/offsets.go
@@ -0,0 +1,49 @@
+package cluster
+
+import (
+ "sync"
+
+ "github.com/Shopify/sarama"
+)
+
+// OffsetStash allows to accumulate offsets and
+// mark them as processed in a bulk
+type OffsetStash struct {
+ offsets map[topicPartition]offsetInfo
+ mu sync.Mutex
+}
+
+// NewOffsetStash inits a blank stash
+func NewOffsetStash() *OffsetStash {
+ return &OffsetStash{offsets: make(map[topicPartition]offsetInfo)}
+}
+
+// MarkOffset stashes the provided message offset
+func (s *OffsetStash) MarkOffset(msg *sarama.ConsumerMessage, metadata string) {
+ s.MarkPartitionOffset(msg.Topic, msg.Partition, msg.Offset, metadata)
+}
+
+// MarkPartitionOffset stashes the offset for the provided topic/partition combination
+func (s *OffsetStash) MarkPartitionOffset(topic string, partition int32, offset int64, metadata string) {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+
+ key := topicPartition{Topic: topic, Partition: partition}
+ if info := s.offsets[key]; offset >= info.Offset {
+ info.Offset = offset
+ info.Metadata = metadata
+ s.offsets[key] = info
+ }
+}
+
+// Offsets returns the latest stashed offsets by topic-partition
+func (s *OffsetStash) Offsets() map[string]int64 {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+
+ res := make(map[string]int64, len(s.offsets))
+ for tp, info := range s.offsets {
+ res[tp.String()] = info.Offset
+ }
+ return res
+}
diff --git a/vendor/github.com/bsm/sarama-cluster/partitions.go b/vendor/github.com/bsm/sarama-cluster/partitions.go
new file mode 100644
index 00000000..8266a879
--- /dev/null
+++ b/vendor/github.com/bsm/sarama-cluster/partitions.go
@@ -0,0 +1,214 @@
+package cluster
+
+import (
+ "sort"
+ "sync"
+ "time"
+
+ "github.com/Shopify/sarama"
+)
+
+type partitionConsumer struct {
+ pcm sarama.PartitionConsumer
+
+ state partitionState
+ mu sync.Mutex
+
+ closed bool
+ dying, dead chan none
+}
+
+func newPartitionConsumer(manager sarama.Consumer, topic string, partition int32, info offsetInfo, defaultOffset int64) (*partitionConsumer, error) {
+ pcm, err := manager.ConsumePartition(topic, partition, info.NextOffset(defaultOffset))
+
+ // Resume from default offset, if requested offset is out-of-range
+ if err == sarama.ErrOffsetOutOfRange {
+ info.Offset = -1
+ pcm, err = manager.ConsumePartition(topic, partition, defaultOffset)
+ }
+ if err != nil {
+ return nil, err
+ }
+
+ return &partitionConsumer{
+ pcm: pcm,
+ state: partitionState{Info: info},
+
+ dying: make(chan none),
+ dead: make(chan none),
+ }, nil
+}
+
+func (c *partitionConsumer) Loop(messages chan<- *sarama.ConsumerMessage, errors chan<- error) {
+ defer close(c.dead)
+
+ for {
+ select {
+ case msg, ok := <-c.pcm.Messages():
+ if !ok {
+ return
+ }
+ select {
+ case messages <- msg:
+ case <-c.dying:
+ return
+ }
+ case err, ok := <-c.pcm.Errors():
+ if !ok {
+ return
+ }
+ select {
+ case errors <- err:
+ case <-c.dying:
+ return
+ }
+ case <-c.dying:
+ return
+ }
+ }
+}
+
+func (c *partitionConsumer) Close() error {
+ if c.closed {
+ return nil
+ }
+
+ err := c.pcm.Close()
+ c.closed = true
+ close(c.dying)
+ <-c.dead
+
+ return err
+}
+
+func (c *partitionConsumer) State() partitionState {
+ if c == nil {
+ return partitionState{}
+ }
+
+ c.mu.Lock()
+ state := c.state
+ c.mu.Unlock()
+
+ return state
+}
+
+func (c *partitionConsumer) MarkCommitted(offset int64) {
+ if c == nil {
+ return
+ }
+
+ c.mu.Lock()
+ if offset == c.state.Info.Offset {
+ c.state.Dirty = false
+ }
+ c.mu.Unlock()
+}
+
+func (c *partitionConsumer) MarkOffset(offset int64, metadata string) {
+ if c == nil {
+ return
+ }
+
+ c.mu.Lock()
+ if offset > c.state.Info.Offset {
+ c.state.Info.Offset = offset
+ c.state.Info.Metadata = metadata
+ c.state.Dirty = true
+ }
+ c.mu.Unlock()
+}
+
+// --------------------------------------------------------------------
+
+type partitionState struct {
+ Info offsetInfo
+ Dirty bool
+ LastCommit time.Time
+}
+
+// --------------------------------------------------------------------
+
+type partitionMap struct {
+ data map[topicPartition]*partitionConsumer
+ mu sync.RWMutex
+}
+
+func newPartitionMap() *partitionMap {
+ return &partitionMap{
+ data: make(map[topicPartition]*partitionConsumer),
+ }
+}
+
+func (m *partitionMap) IsSubscribedTo(topic string) bool {
+ m.mu.RLock()
+ defer m.mu.RUnlock()
+
+ for tp := range m.data {
+ if tp.Topic == topic {
+ return true
+ }
+ }
+ return false
+}
+
+func (m *partitionMap) Fetch(topic string, partition int32) *partitionConsumer {
+ m.mu.RLock()
+ pc, _ := m.data[topicPartition{topic, partition}]
+ m.mu.RUnlock()
+ return pc
+}
+
+func (m *partitionMap) Store(topic string, partition int32, pc *partitionConsumer) {
+ m.mu.Lock()
+ m.data[topicPartition{topic, partition}] = pc
+ m.mu.Unlock()
+}
+
+func (m *partitionMap) Snapshot() map[topicPartition]partitionState {
+ m.mu.RLock()
+ defer m.mu.RUnlock()
+
+ snap := make(map[topicPartition]partitionState, len(m.data))
+ for tp, pc := range m.data {
+ snap[tp] = pc.State()
+ }
+ return snap
+}
+
+func (m *partitionMap) Stop() {
+ m.mu.RLock()
+ defer m.mu.RUnlock()
+
+ var wg sync.WaitGroup
+ for tp := range m.data {
+ wg.Add(1)
+ go func(p *partitionConsumer) {
+ _ = p.Close()
+ wg.Done()
+ }(m.data[tp])
+ }
+ wg.Wait()
+}
+
+func (m *partitionMap) Clear() {
+ m.mu.Lock()
+ for tp := range m.data {
+ delete(m.data, tp)
+ }
+ m.mu.Unlock()
+}
+
+func (m *partitionMap) Info() map[string][]int32 {
+ info := make(map[string][]int32)
+ m.mu.RLock()
+ for tp := range m.data {
+ info[tp.Topic] = append(info[tp.Topic], tp.Partition)
+ }
+ m.mu.RUnlock()
+
+ for topic := range info {
+ sort.Sort(int32Slice(info[topic]))
+ }
+ return info
+}
diff --git a/vendor/github.com/davecgh/go-spew/LICENSE b/vendor/github.com/davecgh/go-spew/LICENSE
new file mode 100644
index 00000000..c8364161
--- /dev/null
+++ b/vendor/github.com/davecgh/go-spew/LICENSE
@@ -0,0 +1,15 @@
+ISC License
+
+Copyright (c) 2012-2016 Dave Collins
+
+Permission to use, copy, modify, and distribute this software for any
+purpose with or without fee is hereby granted, provided that the above
+copyright notice and this permission notice appear in all copies.
+
+THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
diff --git a/vendor/github.com/davecgh/go-spew/spew/bypass.go b/vendor/github.com/davecgh/go-spew/spew/bypass.go
new file mode 100644
index 00000000..8a4a6589
--- /dev/null
+++ b/vendor/github.com/davecgh/go-spew/spew/bypass.go
@@ -0,0 +1,152 @@
+// Copyright (c) 2015-2016 Dave Collins
+//
+// Permission to use, copy, modify, and distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+// NOTE: Due to the following build constraints, this file will only be compiled
+// when the code is not running on Google App Engine, compiled by GopherJS, and
+// "-tags safe" is not added to the go build command line. The "disableunsafe"
+// tag is deprecated and thus should not be used.
+// +build !js,!appengine,!safe,!disableunsafe
+
+package spew
+
+import (
+ "reflect"
+ "unsafe"
+)
+
+const (
+ // UnsafeDisabled is a build-time constant which specifies whether or
+ // not access to the unsafe package is available.
+ UnsafeDisabled = false
+
+ // ptrSize is the size of a pointer on the current arch.
+ ptrSize = unsafe.Sizeof((*byte)(nil))
+)
+
+var (
+ // offsetPtr, offsetScalar, and offsetFlag are the offsets for the
+ // internal reflect.Value fields. These values are valid before golang
+ // commit ecccf07e7f9d which changed the format. The are also valid
+ // after commit 82f48826c6c7 which changed the format again to mirror
+ // the original format. Code in the init function updates these offsets
+ // as necessary.
+ offsetPtr = uintptr(ptrSize)
+ offsetScalar = uintptr(0)
+ offsetFlag = uintptr(ptrSize * 2)
+
+ // flagKindWidth and flagKindShift indicate various bits that the
+ // reflect package uses internally to track kind information.
+ //
+ // flagRO indicates whether or not the value field of a reflect.Value is
+ // read-only.
+ //
+ // flagIndir indicates whether the value field of a reflect.Value is
+ // the actual data or a pointer to the data.
+ //
+ // These values are valid before golang commit 90a7c3c86944 which
+ // changed their positions. Code in the init function updates these
+ // flags as necessary.
+ flagKindWidth = uintptr(5)
+ flagKindShift = uintptr(flagKindWidth - 1)
+ flagRO = uintptr(1 << 0)
+ flagIndir = uintptr(1 << 1)
+)
+
+func init() {
+ // Older versions of reflect.Value stored small integers directly in the
+ // ptr field (which is named val in the older versions). Versions
+ // between commits ecccf07e7f9d and 82f48826c6c7 added a new field named
+ // scalar for this purpose which unfortunately came before the flag
+ // field, so the offset of the flag field is different for those
+ // versions.
+ //
+ // This code constructs a new reflect.Value from a known small integer
+ // and checks if the size of the reflect.Value struct indicates it has
+ // the scalar field. When it does, the offsets are updated accordingly.
+ vv := reflect.ValueOf(0xf00)
+ if unsafe.Sizeof(vv) == (ptrSize * 4) {
+ offsetScalar = ptrSize * 2
+ offsetFlag = ptrSize * 3
+ }
+
+ // Commit 90a7c3c86944 changed the flag positions such that the low
+ // order bits are the kind. This code extracts the kind from the flags
+ // field and ensures it's the correct type. When it's not, the flag
+ // order has been changed to the newer format, so the flags are updated
+ // accordingly.
+ upf := unsafe.Pointer(uintptr(unsafe.Pointer(&vv)) + offsetFlag)
+ upfv := *(*uintptr)(upf)
+ flagKindMask := uintptr((1<>flagKindShift != uintptr(reflect.Int) {
+ flagKindShift = 0
+ flagRO = 1 << 5
+ flagIndir = 1 << 6
+
+ // Commit adf9b30e5594 modified the flags to separate the
+ // flagRO flag into two bits which specifies whether or not the
+ // field is embedded. This causes flagIndir to move over a bit
+ // and means that flagRO is the combination of either of the
+ // original flagRO bit and the new bit.
+ //
+ // This code detects the change by extracting what used to be
+ // the indirect bit to ensure it's set. When it's not, the flag
+ // order has been changed to the newer format, so the flags are
+ // updated accordingly.
+ if upfv&flagIndir == 0 {
+ flagRO = 3 << 5
+ flagIndir = 1 << 7
+ }
+ }
+}
+
+// unsafeReflectValue converts the passed reflect.Value into a one that bypasses
+// the typical safety restrictions preventing access to unaddressable and
+// unexported data. It works by digging the raw pointer to the underlying
+// value out of the protected value and generating a new unprotected (unsafe)
+// reflect.Value to it.
+//
+// This allows us to check for implementations of the Stringer and error
+// interfaces to be used for pretty printing ordinarily unaddressable and
+// inaccessible values such as unexported struct fields.
+func unsafeReflectValue(v reflect.Value) (rv reflect.Value) {
+ indirects := 1
+ vt := v.Type()
+ upv := unsafe.Pointer(uintptr(unsafe.Pointer(&v)) + offsetPtr)
+ rvf := *(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&v)) + offsetFlag))
+ if rvf&flagIndir != 0 {
+ vt = reflect.PtrTo(v.Type())
+ indirects++
+ } else if offsetScalar != 0 {
+ // The value is in the scalar field when it's not one of the
+ // reference types.
+ switch vt.Kind() {
+ case reflect.Uintptr:
+ case reflect.Chan:
+ case reflect.Func:
+ case reflect.Map:
+ case reflect.Ptr:
+ case reflect.UnsafePointer:
+ default:
+ upv = unsafe.Pointer(uintptr(unsafe.Pointer(&v)) +
+ offsetScalar)
+ }
+ }
+
+ pv := reflect.NewAt(vt, upv)
+ rv = pv
+ for i := 0; i < indirects; i++ {
+ rv = rv.Elem()
+ }
+ return rv
+}
diff --git a/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go b/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go
new file mode 100644
index 00000000..1fe3cf3d
--- /dev/null
+++ b/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go
@@ -0,0 +1,38 @@
+// Copyright (c) 2015-2016 Dave Collins
+//
+// Permission to use, copy, modify, and distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+// NOTE: Due to the following build constraints, this file will only be compiled
+// when the code is running on Google App Engine, compiled by GopherJS, or
+// "-tags safe" is added to the go build command line. The "disableunsafe"
+// tag is deprecated and thus should not be used.
+// +build js appengine safe disableunsafe
+
+package spew
+
+import "reflect"
+
+const (
+ // UnsafeDisabled is a build-time constant which specifies whether or
+ // not access to the unsafe package is available.
+ UnsafeDisabled = true
+)
+
+// unsafeReflectValue typically converts the passed reflect.Value into a one
+// that bypasses the typical safety restrictions preventing access to
+// unaddressable and unexported data. However, doing this relies on access to
+// the unsafe package. This is a stub version which simply returns the passed
+// reflect.Value when the unsafe package is not available.
+func unsafeReflectValue(v reflect.Value) reflect.Value {
+ return v
+}
diff --git a/vendor/github.com/davecgh/go-spew/spew/common.go b/vendor/github.com/davecgh/go-spew/spew/common.go
new file mode 100644
index 00000000..1be8ce94
--- /dev/null
+++ b/vendor/github.com/davecgh/go-spew/spew/common.go
@@ -0,0 +1,341 @@
+/*
+ * Copyright (c) 2013-2016 Dave Collins
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+package spew
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "reflect"
+ "sort"
+ "strconv"
+)
+
+// Some constants in the form of bytes to avoid string overhead. This mirrors
+// the technique used in the fmt package.
+var (
+ panicBytes = []byte("(PANIC=")
+ plusBytes = []byte("+")
+ iBytes = []byte("i")
+ trueBytes = []byte("true")
+ falseBytes = []byte("false")
+ interfaceBytes = []byte("(interface {})")
+ commaNewlineBytes = []byte(",\n")
+ newlineBytes = []byte("\n")
+ openBraceBytes = []byte("{")
+ openBraceNewlineBytes = []byte("{\n")
+ closeBraceBytes = []byte("}")
+ asteriskBytes = []byte("*")
+ colonBytes = []byte(":")
+ colonSpaceBytes = []byte(": ")
+ openParenBytes = []byte("(")
+ closeParenBytes = []byte(")")
+ spaceBytes = []byte(" ")
+ pointerChainBytes = []byte("->")
+ nilAngleBytes = []byte("")
+ maxNewlineBytes = []byte("\n")
+ maxShortBytes = []byte("")
+ circularBytes = []byte("")
+ circularShortBytes = []byte("")
+ invalidAngleBytes = []byte("")
+ openBracketBytes = []byte("[")
+ closeBracketBytes = []byte("]")
+ percentBytes = []byte("%")
+ precisionBytes = []byte(".")
+ openAngleBytes = []byte("<")
+ closeAngleBytes = []byte(">")
+ openMapBytes = []byte("map[")
+ closeMapBytes = []byte("]")
+ lenEqualsBytes = []byte("len=")
+ capEqualsBytes = []byte("cap=")
+)
+
+// hexDigits is used to map a decimal value to a hex digit.
+var hexDigits = "0123456789abcdef"
+
+// catchPanic handles any panics that might occur during the handleMethods
+// calls.
+func catchPanic(w io.Writer, v reflect.Value) {
+ if err := recover(); err != nil {
+ w.Write(panicBytes)
+ fmt.Fprintf(w, "%v", err)
+ w.Write(closeParenBytes)
+ }
+}
+
+// handleMethods attempts to call the Error and String methods on the underlying
+// type the passed reflect.Value represents and outputes the result to Writer w.
+//
+// It handles panics in any called methods by catching and displaying the error
+// as the formatted value.
+func handleMethods(cs *ConfigState, w io.Writer, v reflect.Value) (handled bool) {
+ // We need an interface to check if the type implements the error or
+ // Stringer interface. However, the reflect package won't give us an
+ // interface on certain things like unexported struct fields in order
+ // to enforce visibility rules. We use unsafe, when it's available,
+ // to bypass these restrictions since this package does not mutate the
+ // values.
+ if !v.CanInterface() {
+ if UnsafeDisabled {
+ return false
+ }
+
+ v = unsafeReflectValue(v)
+ }
+
+ // Choose whether or not to do error and Stringer interface lookups against
+ // the base type or a pointer to the base type depending on settings.
+ // Technically calling one of these methods with a pointer receiver can
+ // mutate the value, however, types which choose to satisify an error or
+ // Stringer interface with a pointer receiver should not be mutating their
+ // state inside these interface methods.
+ if !cs.DisablePointerMethods && !UnsafeDisabled && !v.CanAddr() {
+ v = unsafeReflectValue(v)
+ }
+ if v.CanAddr() {
+ v = v.Addr()
+ }
+
+ // Is it an error or Stringer?
+ switch iface := v.Interface().(type) {
+ case error:
+ defer catchPanic(w, v)
+ if cs.ContinueOnMethod {
+ w.Write(openParenBytes)
+ w.Write([]byte(iface.Error()))
+ w.Write(closeParenBytes)
+ w.Write(spaceBytes)
+ return false
+ }
+
+ w.Write([]byte(iface.Error()))
+ return true
+
+ case fmt.Stringer:
+ defer catchPanic(w, v)
+ if cs.ContinueOnMethod {
+ w.Write(openParenBytes)
+ w.Write([]byte(iface.String()))
+ w.Write(closeParenBytes)
+ w.Write(spaceBytes)
+ return false
+ }
+ w.Write([]byte(iface.String()))
+ return true
+ }
+ return false
+}
+
+// printBool outputs a boolean value as true or false to Writer w.
+func printBool(w io.Writer, val bool) {
+ if val {
+ w.Write(trueBytes)
+ } else {
+ w.Write(falseBytes)
+ }
+}
+
+// printInt outputs a signed integer value to Writer w.
+func printInt(w io.Writer, val int64, base int) {
+ w.Write([]byte(strconv.FormatInt(val, base)))
+}
+
+// printUint outputs an unsigned integer value to Writer w.
+func printUint(w io.Writer, val uint64, base int) {
+ w.Write([]byte(strconv.FormatUint(val, base)))
+}
+
+// printFloat outputs a floating point value using the specified precision,
+// which is expected to be 32 or 64bit, to Writer w.
+func printFloat(w io.Writer, val float64, precision int) {
+ w.Write([]byte(strconv.FormatFloat(val, 'g', -1, precision)))
+}
+
+// printComplex outputs a complex value using the specified float precision
+// for the real and imaginary parts to Writer w.
+func printComplex(w io.Writer, c complex128, floatPrecision int) {
+ r := real(c)
+ w.Write(openParenBytes)
+ w.Write([]byte(strconv.FormatFloat(r, 'g', -1, floatPrecision)))
+ i := imag(c)
+ if i >= 0 {
+ w.Write(plusBytes)
+ }
+ w.Write([]byte(strconv.FormatFloat(i, 'g', -1, floatPrecision)))
+ w.Write(iBytes)
+ w.Write(closeParenBytes)
+}
+
+// printHexPtr outputs a uintptr formatted as hexadecimal with a leading '0x'
+// prefix to Writer w.
+func printHexPtr(w io.Writer, p uintptr) {
+ // Null pointer.
+ num := uint64(p)
+ if num == 0 {
+ w.Write(nilAngleBytes)
+ return
+ }
+
+ // Max uint64 is 16 bytes in hex + 2 bytes for '0x' prefix
+ buf := make([]byte, 18)
+
+ // It's simpler to construct the hex string right to left.
+ base := uint64(16)
+ i := len(buf) - 1
+ for num >= base {
+ buf[i] = hexDigits[num%base]
+ num /= base
+ i--
+ }
+ buf[i] = hexDigits[num]
+
+ // Add '0x' prefix.
+ i--
+ buf[i] = 'x'
+ i--
+ buf[i] = '0'
+
+ // Strip unused leading bytes.
+ buf = buf[i:]
+ w.Write(buf)
+}
+
+// valuesSorter implements sort.Interface to allow a slice of reflect.Value
+// elements to be sorted.
+type valuesSorter struct {
+ values []reflect.Value
+ strings []string // either nil or same len and values
+ cs *ConfigState
+}
+
+// newValuesSorter initializes a valuesSorter instance, which holds a set of
+// surrogate keys on which the data should be sorted. It uses flags in
+// ConfigState to decide if and how to populate those surrogate keys.
+func newValuesSorter(values []reflect.Value, cs *ConfigState) sort.Interface {
+ vs := &valuesSorter{values: values, cs: cs}
+ if canSortSimply(vs.values[0].Kind()) {
+ return vs
+ }
+ if !cs.DisableMethods {
+ vs.strings = make([]string, len(values))
+ for i := range vs.values {
+ b := bytes.Buffer{}
+ if !handleMethods(cs, &b, vs.values[i]) {
+ vs.strings = nil
+ break
+ }
+ vs.strings[i] = b.String()
+ }
+ }
+ if vs.strings == nil && cs.SpewKeys {
+ vs.strings = make([]string, len(values))
+ for i := range vs.values {
+ vs.strings[i] = Sprintf("%#v", vs.values[i].Interface())
+ }
+ }
+ return vs
+}
+
+// canSortSimply tests whether a reflect.Kind is a primitive that can be sorted
+// directly, or whether it should be considered for sorting by surrogate keys
+// (if the ConfigState allows it).
+func canSortSimply(kind reflect.Kind) bool {
+ // This switch parallels valueSortLess, except for the default case.
+ switch kind {
+ case reflect.Bool:
+ return true
+ case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
+ return true
+ case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint:
+ return true
+ case reflect.Float32, reflect.Float64:
+ return true
+ case reflect.String:
+ return true
+ case reflect.Uintptr:
+ return true
+ case reflect.Array:
+ return true
+ }
+ return false
+}
+
+// Len returns the number of values in the slice. It is part of the
+// sort.Interface implementation.
+func (s *valuesSorter) Len() int {
+ return len(s.values)
+}
+
+// Swap swaps the values at the passed indices. It is part of the
+// sort.Interface implementation.
+func (s *valuesSorter) Swap(i, j int) {
+ s.values[i], s.values[j] = s.values[j], s.values[i]
+ if s.strings != nil {
+ s.strings[i], s.strings[j] = s.strings[j], s.strings[i]
+ }
+}
+
+// valueSortLess returns whether the first value should sort before the second
+// value. It is used by valueSorter.Less as part of the sort.Interface
+// implementation.
+func valueSortLess(a, b reflect.Value) bool {
+ switch a.Kind() {
+ case reflect.Bool:
+ return !a.Bool() && b.Bool()
+ case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
+ return a.Int() < b.Int()
+ case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint:
+ return a.Uint() < b.Uint()
+ case reflect.Float32, reflect.Float64:
+ return a.Float() < b.Float()
+ case reflect.String:
+ return a.String() < b.String()
+ case reflect.Uintptr:
+ return a.Uint() < b.Uint()
+ case reflect.Array:
+ // Compare the contents of both arrays.
+ l := a.Len()
+ for i := 0; i < l; i++ {
+ av := a.Index(i)
+ bv := b.Index(i)
+ if av.Interface() == bv.Interface() {
+ continue
+ }
+ return valueSortLess(av, bv)
+ }
+ }
+ return a.String() < b.String()
+}
+
+// Less returns whether the value at index i should sort before the
+// value at index j. It is part of the sort.Interface implementation.
+func (s *valuesSorter) Less(i, j int) bool {
+ if s.strings == nil {
+ return valueSortLess(s.values[i], s.values[j])
+ }
+ return s.strings[i] < s.strings[j]
+}
+
+// sortValues is a sort function that handles both native types and any type that
+// can be converted to error or Stringer. Other inputs are sorted according to
+// their Value.String() value to ensure display stability.
+func sortValues(values []reflect.Value, cs *ConfigState) {
+ if len(values) == 0 {
+ return
+ }
+ sort.Sort(newValuesSorter(values, cs))
+}
diff --git a/vendor/github.com/davecgh/go-spew/spew/config.go b/vendor/github.com/davecgh/go-spew/spew/config.go
new file mode 100644
index 00000000..2e3d22f3
--- /dev/null
+++ b/vendor/github.com/davecgh/go-spew/spew/config.go
@@ -0,0 +1,306 @@
+/*
+ * Copyright (c) 2013-2016 Dave Collins
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+package spew
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "os"
+)
+
+// ConfigState houses the configuration options used by spew to format and
+// display values. There is a global instance, Config, that is used to control
+// all top-level Formatter and Dump functionality. Each ConfigState instance
+// provides methods equivalent to the top-level functions.
+//
+// The zero value for ConfigState provides no indentation. You would typically
+// want to set it to a space or a tab.
+//
+// Alternatively, you can use NewDefaultConfig to get a ConfigState instance
+// with default settings. See the documentation of NewDefaultConfig for default
+// values.
+type ConfigState struct {
+ // Indent specifies the string to use for each indentation level. The
+ // global config instance that all top-level functions use set this to a
+ // single space by default. If you would like more indentation, you might
+ // set this to a tab with "\t" or perhaps two spaces with " ".
+ Indent string
+
+ // MaxDepth controls the maximum number of levels to descend into nested
+ // data structures. The default, 0, means there is no limit.
+ //
+ // NOTE: Circular data structures are properly detected, so it is not
+ // necessary to set this value unless you specifically want to limit deeply
+ // nested data structures.
+ MaxDepth int
+
+ // DisableMethods specifies whether or not error and Stringer interfaces are
+ // invoked for types that implement them.
+ DisableMethods bool
+
+ // DisablePointerMethods specifies whether or not to check for and invoke
+ // error and Stringer interfaces on types which only accept a pointer
+ // receiver when the current type is not a pointer.
+ //
+ // NOTE: This might be an unsafe action since calling one of these methods
+ // with a pointer receiver could technically mutate the value, however,
+ // in practice, types which choose to satisify an error or Stringer
+ // interface with a pointer receiver should not be mutating their state
+ // inside these interface methods. As a result, this option relies on
+ // access to the unsafe package, so it will not have any effect when
+ // running in environments without access to the unsafe package such as
+ // Google App Engine or with the "safe" build tag specified.
+ DisablePointerMethods bool
+
+ // DisablePointerAddresses specifies whether to disable the printing of
+ // pointer addresses. This is useful when diffing data structures in tests.
+ DisablePointerAddresses bool
+
+ // DisableCapacities specifies whether to disable the printing of capacities
+ // for arrays, slices, maps and channels. This is useful when diffing
+ // data structures in tests.
+ DisableCapacities bool
+
+ // ContinueOnMethod specifies whether or not recursion should continue once
+ // a custom error or Stringer interface is invoked. The default, false,
+ // means it will print the results of invoking the custom error or Stringer
+ // interface and return immediately instead of continuing to recurse into
+ // the internals of the data type.
+ //
+ // NOTE: This flag does not have any effect if method invocation is disabled
+ // via the DisableMethods or DisablePointerMethods options.
+ ContinueOnMethod bool
+
+ // SortKeys specifies map keys should be sorted before being printed. Use
+ // this to have a more deterministic, diffable output. Note that only
+ // native types (bool, int, uint, floats, uintptr and string) and types
+ // that support the error or Stringer interfaces (if methods are
+ // enabled) are supported, with other types sorted according to the
+ // reflect.Value.String() output which guarantees display stability.
+ SortKeys bool
+
+ // SpewKeys specifies that, as a last resort attempt, map keys should
+ // be spewed to strings and sorted by those strings. This is only
+ // considered if SortKeys is true.
+ SpewKeys bool
+}
+
+// Config is the active configuration of the top-level functions.
+// The configuration can be changed by modifying the contents of spew.Config.
+var Config = ConfigState{Indent: " "}
+
+// Errorf is a wrapper for fmt.Errorf that treats each argument as if it were
+// passed with a Formatter interface returned by c.NewFormatter. It returns
+// the formatted string as a value that satisfies error. See NewFormatter
+// for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Errorf(format, c.NewFormatter(a), c.NewFormatter(b))
+func (c *ConfigState) Errorf(format string, a ...interface{}) (err error) {
+ return fmt.Errorf(format, c.convertArgs(a)...)
+}
+
+// Fprint is a wrapper for fmt.Fprint that treats each argument as if it were
+// passed with a Formatter interface returned by c.NewFormatter. It returns
+// the number of bytes written and any write error encountered. See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Fprint(w, c.NewFormatter(a), c.NewFormatter(b))
+func (c *ConfigState) Fprint(w io.Writer, a ...interface{}) (n int, err error) {
+ return fmt.Fprint(w, c.convertArgs(a)...)
+}
+
+// Fprintf is a wrapper for fmt.Fprintf that treats each argument as if it were
+// passed with a Formatter interface returned by c.NewFormatter. It returns
+// the number of bytes written and any write error encountered. See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Fprintf(w, format, c.NewFormatter(a), c.NewFormatter(b))
+func (c *ConfigState) Fprintf(w io.Writer, format string, a ...interface{}) (n int, err error) {
+ return fmt.Fprintf(w, format, c.convertArgs(a)...)
+}
+
+// Fprintln is a wrapper for fmt.Fprintln that treats each argument as if it
+// passed with a Formatter interface returned by c.NewFormatter. See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Fprintln(w, c.NewFormatter(a), c.NewFormatter(b))
+func (c *ConfigState) Fprintln(w io.Writer, a ...interface{}) (n int, err error) {
+ return fmt.Fprintln(w, c.convertArgs(a)...)
+}
+
+// Print is a wrapper for fmt.Print that treats each argument as if it were
+// passed with a Formatter interface returned by c.NewFormatter. It returns
+// the number of bytes written and any write error encountered. See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Print(c.NewFormatter(a), c.NewFormatter(b))
+func (c *ConfigState) Print(a ...interface{}) (n int, err error) {
+ return fmt.Print(c.convertArgs(a)...)
+}
+
+// Printf is a wrapper for fmt.Printf that treats each argument as if it were
+// passed with a Formatter interface returned by c.NewFormatter. It returns
+// the number of bytes written and any write error encountered. See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Printf(format, c.NewFormatter(a), c.NewFormatter(b))
+func (c *ConfigState) Printf(format string, a ...interface{}) (n int, err error) {
+ return fmt.Printf(format, c.convertArgs(a)...)
+}
+
+// Println is a wrapper for fmt.Println that treats each argument as if it were
+// passed with a Formatter interface returned by c.NewFormatter. It returns
+// the number of bytes written and any write error encountered. See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Println(c.NewFormatter(a), c.NewFormatter(b))
+func (c *ConfigState) Println(a ...interface{}) (n int, err error) {
+ return fmt.Println(c.convertArgs(a)...)
+}
+
+// Sprint is a wrapper for fmt.Sprint that treats each argument as if it were
+// passed with a Formatter interface returned by c.NewFormatter. It returns
+// the resulting string. See NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Sprint(c.NewFormatter(a), c.NewFormatter(b))
+func (c *ConfigState) Sprint(a ...interface{}) string {
+ return fmt.Sprint(c.convertArgs(a)...)
+}
+
+// Sprintf is a wrapper for fmt.Sprintf that treats each argument as if it were
+// passed with a Formatter interface returned by c.NewFormatter. It returns
+// the resulting string. See NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Sprintf(format, c.NewFormatter(a), c.NewFormatter(b))
+func (c *ConfigState) Sprintf(format string, a ...interface{}) string {
+ return fmt.Sprintf(format, c.convertArgs(a)...)
+}
+
+// Sprintln is a wrapper for fmt.Sprintln that treats each argument as if it
+// were passed with a Formatter interface returned by c.NewFormatter. It
+// returns the resulting string. See NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Sprintln(c.NewFormatter(a), c.NewFormatter(b))
+func (c *ConfigState) Sprintln(a ...interface{}) string {
+ return fmt.Sprintln(c.convertArgs(a)...)
+}
+
+/*
+NewFormatter returns a custom formatter that satisfies the fmt.Formatter
+interface. As a result, it integrates cleanly with standard fmt package
+printing functions. The formatter is useful for inline printing of smaller data
+types similar to the standard %v format specifier.
+
+The custom formatter only responds to the %v (most compact), %+v (adds pointer
+addresses), %#v (adds types), and %#+v (adds types and pointer addresses) verb
+combinations. Any other verbs such as %x and %q will be sent to the the
+standard fmt package for formatting. In addition, the custom formatter ignores
+the width and precision arguments (however they will still work on the format
+specifiers not handled by the custom formatter).
+
+Typically this function shouldn't be called directly. It is much easier to make
+use of the custom formatter by calling one of the convenience functions such as
+c.Printf, c.Println, or c.Printf.
+*/
+func (c *ConfigState) NewFormatter(v interface{}) fmt.Formatter {
+ return newFormatter(c, v)
+}
+
+// Fdump formats and displays the passed arguments to io.Writer w. It formats
+// exactly the same as Dump.
+func (c *ConfigState) Fdump(w io.Writer, a ...interface{}) {
+ fdump(c, w, a...)
+}
+
+/*
+Dump displays the passed parameters to standard out with newlines, customizable
+indentation, and additional debug information such as complete types and all
+pointer addresses used to indirect to the final value. It provides the
+following features over the built-in printing facilities provided by the fmt
+package:
+
+ * Pointers are dereferenced and followed
+ * Circular data structures are detected and handled properly
+ * Custom Stringer/error interfaces are optionally invoked, including
+ on unexported types
+ * Custom types which only implement the Stringer/error interfaces via
+ a pointer receiver are optionally invoked when passing non-pointer
+ variables
+ * Byte arrays and slices are dumped like the hexdump -C command which
+ includes offsets, byte values in hex, and ASCII output
+
+The configuration options are controlled by modifying the public members
+of c. See ConfigState for options documentation.
+
+See Fdump if you would prefer dumping to an arbitrary io.Writer or Sdump to
+get the formatted result as a string.
+*/
+func (c *ConfigState) Dump(a ...interface{}) {
+ fdump(c, os.Stdout, a...)
+}
+
+// Sdump returns a string with the passed arguments formatted exactly the same
+// as Dump.
+func (c *ConfigState) Sdump(a ...interface{}) string {
+ var buf bytes.Buffer
+ fdump(c, &buf, a...)
+ return buf.String()
+}
+
+// convertArgs accepts a slice of arguments and returns a slice of the same
+// length with each argument converted to a spew Formatter interface using
+// the ConfigState associated with s.
+func (c *ConfigState) convertArgs(args []interface{}) (formatters []interface{}) {
+ formatters = make([]interface{}, len(args))
+ for index, arg := range args {
+ formatters[index] = newFormatter(c, arg)
+ }
+ return formatters
+}
+
+// NewDefaultConfig returns a ConfigState with the following default settings.
+//
+// Indent: " "
+// MaxDepth: 0
+// DisableMethods: false
+// DisablePointerMethods: false
+// ContinueOnMethod: false
+// SortKeys: false
+func NewDefaultConfig() *ConfigState {
+ return &ConfigState{Indent: " "}
+}
diff --git a/vendor/github.com/davecgh/go-spew/spew/doc.go b/vendor/github.com/davecgh/go-spew/spew/doc.go
new file mode 100644
index 00000000..aacaac6f
--- /dev/null
+++ b/vendor/github.com/davecgh/go-spew/spew/doc.go
@@ -0,0 +1,211 @@
+/*
+ * Copyright (c) 2013-2016 Dave Collins
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+Package spew implements a deep pretty printer for Go data structures to aid in
+debugging.
+
+A quick overview of the additional features spew provides over the built-in
+printing facilities for Go data types are as follows:
+
+ * Pointers are dereferenced and followed
+ * Circular data structures are detected and handled properly
+ * Custom Stringer/error interfaces are optionally invoked, including
+ on unexported types
+ * Custom types which only implement the Stringer/error interfaces via
+ a pointer receiver are optionally invoked when passing non-pointer
+ variables
+ * Byte arrays and slices are dumped like the hexdump -C command which
+ includes offsets, byte values in hex, and ASCII output (only when using
+ Dump style)
+
+There are two different approaches spew allows for dumping Go data structures:
+
+ * Dump style which prints with newlines, customizable indentation,
+ and additional debug information such as types and all pointer addresses
+ used to indirect to the final value
+ * A custom Formatter interface that integrates cleanly with the standard fmt
+ package and replaces %v, %+v, %#v, and %#+v to provide inline printing
+ similar to the default %v while providing the additional functionality
+ outlined above and passing unsupported format verbs such as %x and %q
+ along to fmt
+
+Quick Start
+
+This section demonstrates how to quickly get started with spew. See the
+sections below for further details on formatting and configuration options.
+
+To dump a variable with full newlines, indentation, type, and pointer
+information use Dump, Fdump, or Sdump:
+ spew.Dump(myVar1, myVar2, ...)
+ spew.Fdump(someWriter, myVar1, myVar2, ...)
+ str := spew.Sdump(myVar1, myVar2, ...)
+
+Alternatively, if you would prefer to use format strings with a compacted inline
+printing style, use the convenience wrappers Printf, Fprintf, etc with
+%v (most compact), %+v (adds pointer addresses), %#v (adds types), or
+%#+v (adds types and pointer addresses):
+ spew.Printf("myVar1: %v -- myVar2: %+v", myVar1, myVar2)
+ spew.Printf("myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
+ spew.Fprintf(someWriter, "myVar1: %v -- myVar2: %+v", myVar1, myVar2)
+ spew.Fprintf(someWriter, "myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
+
+Configuration Options
+
+Configuration of spew is handled by fields in the ConfigState type. For
+convenience, all of the top-level functions use a global state available
+via the spew.Config global.
+
+It is also possible to create a ConfigState instance that provides methods
+equivalent to the top-level functions. This allows concurrent configuration
+options. See the ConfigState documentation for more details.
+
+The following configuration options are available:
+ * Indent
+ String to use for each indentation level for Dump functions.
+ It is a single space by default. A popular alternative is "\t".
+
+ * MaxDepth
+ Maximum number of levels to descend into nested data structures.
+ There is no limit by default.
+
+ * DisableMethods
+ Disables invocation of error and Stringer interface methods.
+ Method invocation is enabled by default.
+
+ * DisablePointerMethods
+ Disables invocation of error and Stringer interface methods on types
+ which only accept pointer receivers from non-pointer variables.
+ Pointer method invocation is enabled by default.
+
+ * DisablePointerAddresses
+ DisablePointerAddresses specifies whether to disable the printing of
+ pointer addresses. This is useful when diffing data structures in tests.
+
+ * DisableCapacities
+ DisableCapacities specifies whether to disable the printing of
+ capacities for arrays, slices, maps and channels. This is useful when
+ diffing data structures in tests.
+
+ * ContinueOnMethod
+ Enables recursion into types after invoking error and Stringer interface
+ methods. Recursion after method invocation is disabled by default.
+
+ * SortKeys
+ Specifies map keys should be sorted before being printed. Use
+ this to have a more deterministic, diffable output. Note that
+ only native types (bool, int, uint, floats, uintptr and string)
+ and types which implement error or Stringer interfaces are
+ supported with other types sorted according to the
+ reflect.Value.String() output which guarantees display
+ stability. Natural map order is used by default.
+
+ * SpewKeys
+ Specifies that, as a last resort attempt, map keys should be
+ spewed to strings and sorted by those strings. This is only
+ considered if SortKeys is true.
+
+Dump Usage
+
+Simply call spew.Dump with a list of variables you want to dump:
+
+ spew.Dump(myVar1, myVar2, ...)
+
+You may also call spew.Fdump if you would prefer to output to an arbitrary
+io.Writer. For example, to dump to standard error:
+
+ spew.Fdump(os.Stderr, myVar1, myVar2, ...)
+
+A third option is to call spew.Sdump to get the formatted output as a string:
+
+ str := spew.Sdump(myVar1, myVar2, ...)
+
+Sample Dump Output
+
+See the Dump example for details on the setup of the types and variables being
+shown here.
+
+ (main.Foo) {
+ unexportedField: (*main.Bar)(0xf84002e210)({
+ flag: (main.Flag) flagTwo,
+ data: (uintptr)
+ }),
+ ExportedField: (map[interface {}]interface {}) (len=1) {
+ (string) (len=3) "one": (bool) true
+ }
+ }
+
+Byte (and uint8) arrays and slices are displayed uniquely like the hexdump -C
+command as shown.
+ ([]uint8) (len=32 cap=32) {
+ 00000000 11 12 13 14 15 16 17 18 19 1a 1b 1c 1d 1e 1f 20 |............... |
+ 00000010 21 22 23 24 25 26 27 28 29 2a 2b 2c 2d 2e 2f 30 |!"#$%&'()*+,-./0|
+ 00000020 31 32 |12|
+ }
+
+Custom Formatter
+
+Spew provides a custom formatter that implements the fmt.Formatter interface
+so that it integrates cleanly with standard fmt package printing functions. The
+formatter is useful for inline printing of smaller data types similar to the
+standard %v format specifier.
+
+The custom formatter only responds to the %v (most compact), %+v (adds pointer
+addresses), %#v (adds types), or %#+v (adds types and pointer addresses) verb
+combinations. Any other verbs such as %x and %q will be sent to the the
+standard fmt package for formatting. In addition, the custom formatter ignores
+the width and precision arguments (however they will still work on the format
+specifiers not handled by the custom formatter).
+
+Custom Formatter Usage
+
+The simplest way to make use of the spew custom formatter is to call one of the
+convenience functions such as spew.Printf, spew.Println, or spew.Printf. The
+functions have syntax you are most likely already familiar with:
+
+ spew.Printf("myVar1: %v -- myVar2: %+v", myVar1, myVar2)
+ spew.Printf("myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
+ spew.Println(myVar, myVar2)
+ spew.Fprintf(os.Stderr, "myVar1: %v -- myVar2: %+v", myVar1, myVar2)
+ spew.Fprintf(os.Stderr, "myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
+
+See the Index for the full list convenience functions.
+
+Sample Formatter Output
+
+Double pointer to a uint8:
+ %v: <**>5
+ %+v: <**>(0xf8400420d0->0xf8400420c8)5
+ %#v: (**uint8)5
+ %#+v: (**uint8)(0xf8400420d0->0xf8400420c8)5
+
+Pointer to circular struct with a uint8 field and a pointer to itself:
+ %v: <*>{1 <*>}
+ %+v: <*>(0xf84003e260){ui8:1 c:<*>(0xf84003e260)}
+ %#v: (*main.circular){ui8:(uint8)1 c:(*main.circular)}
+ %#+v: (*main.circular)(0xf84003e260){ui8:(uint8)1 c:(*main.circular)(0xf84003e260)}
+
+See the Printf example for details on the setup of variables being shown
+here.
+
+Errors
+
+Since it is possible for custom Stringer/error interfaces to panic, spew
+detects them and handles them internally by printing the panic information
+inline with the output. Since spew is intended to provide deep pretty printing
+capabilities on structures, it intentionally does not return any errors.
+*/
+package spew
diff --git a/vendor/github.com/davecgh/go-spew/spew/dump.go b/vendor/github.com/davecgh/go-spew/spew/dump.go
new file mode 100644
index 00000000..df1d582a
--- /dev/null
+++ b/vendor/github.com/davecgh/go-spew/spew/dump.go
@@ -0,0 +1,509 @@
+/*
+ * Copyright (c) 2013-2016 Dave Collins
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+package spew
+
+import (
+ "bytes"
+ "encoding/hex"
+ "fmt"
+ "io"
+ "os"
+ "reflect"
+ "regexp"
+ "strconv"
+ "strings"
+)
+
+var (
+ // uint8Type is a reflect.Type representing a uint8. It is used to
+ // convert cgo types to uint8 slices for hexdumping.
+ uint8Type = reflect.TypeOf(uint8(0))
+
+ // cCharRE is a regular expression that matches a cgo char.
+ // It is used to detect character arrays to hexdump them.
+ cCharRE = regexp.MustCompile("^.*\\._Ctype_char$")
+
+ // cUnsignedCharRE is a regular expression that matches a cgo unsigned
+ // char. It is used to detect unsigned character arrays to hexdump
+ // them.
+ cUnsignedCharRE = regexp.MustCompile("^.*\\._Ctype_unsignedchar$")
+
+ // cUint8tCharRE is a regular expression that matches a cgo uint8_t.
+ // It is used to detect uint8_t arrays to hexdump them.
+ cUint8tCharRE = regexp.MustCompile("^.*\\._Ctype_uint8_t$")
+)
+
+// dumpState contains information about the state of a dump operation.
+type dumpState struct {
+ w io.Writer
+ depth int
+ pointers map[uintptr]int
+ ignoreNextType bool
+ ignoreNextIndent bool
+ cs *ConfigState
+}
+
+// indent performs indentation according to the depth level and cs.Indent
+// option.
+func (d *dumpState) indent() {
+ if d.ignoreNextIndent {
+ d.ignoreNextIndent = false
+ return
+ }
+ d.w.Write(bytes.Repeat([]byte(d.cs.Indent), d.depth))
+}
+
+// unpackValue returns values inside of non-nil interfaces when possible.
+// This is useful for data types like structs, arrays, slices, and maps which
+// can contain varying types packed inside an interface.
+func (d *dumpState) unpackValue(v reflect.Value) reflect.Value {
+ if v.Kind() == reflect.Interface && !v.IsNil() {
+ v = v.Elem()
+ }
+ return v
+}
+
+// dumpPtr handles formatting of pointers by indirecting them as necessary.
+func (d *dumpState) dumpPtr(v reflect.Value) {
+ // Remove pointers at or below the current depth from map used to detect
+ // circular refs.
+ for k, depth := range d.pointers {
+ if depth >= d.depth {
+ delete(d.pointers, k)
+ }
+ }
+
+ // Keep list of all dereferenced pointers to show later.
+ pointerChain := make([]uintptr, 0)
+
+ // Figure out how many levels of indirection there are by dereferencing
+ // pointers and unpacking interfaces down the chain while detecting circular
+ // references.
+ nilFound := false
+ cycleFound := false
+ indirects := 0
+ ve := v
+ for ve.Kind() == reflect.Ptr {
+ if ve.IsNil() {
+ nilFound = true
+ break
+ }
+ indirects++
+ addr := ve.Pointer()
+ pointerChain = append(pointerChain, addr)
+ if pd, ok := d.pointers[addr]; ok && pd < d.depth {
+ cycleFound = true
+ indirects--
+ break
+ }
+ d.pointers[addr] = d.depth
+
+ ve = ve.Elem()
+ if ve.Kind() == reflect.Interface {
+ if ve.IsNil() {
+ nilFound = true
+ break
+ }
+ ve = ve.Elem()
+ }
+ }
+
+ // Display type information.
+ d.w.Write(openParenBytes)
+ d.w.Write(bytes.Repeat(asteriskBytes, indirects))
+ d.w.Write([]byte(ve.Type().String()))
+ d.w.Write(closeParenBytes)
+
+ // Display pointer information.
+ if !d.cs.DisablePointerAddresses && len(pointerChain) > 0 {
+ d.w.Write(openParenBytes)
+ for i, addr := range pointerChain {
+ if i > 0 {
+ d.w.Write(pointerChainBytes)
+ }
+ printHexPtr(d.w, addr)
+ }
+ d.w.Write(closeParenBytes)
+ }
+
+ // Display dereferenced value.
+ d.w.Write(openParenBytes)
+ switch {
+ case nilFound == true:
+ d.w.Write(nilAngleBytes)
+
+ case cycleFound == true:
+ d.w.Write(circularBytes)
+
+ default:
+ d.ignoreNextType = true
+ d.dump(ve)
+ }
+ d.w.Write(closeParenBytes)
+}
+
+// dumpSlice handles formatting of arrays and slices. Byte (uint8 under
+// reflection) arrays and slices are dumped in hexdump -C fashion.
+func (d *dumpState) dumpSlice(v reflect.Value) {
+ // Determine whether this type should be hex dumped or not. Also,
+ // for types which should be hexdumped, try to use the underlying data
+ // first, then fall back to trying to convert them to a uint8 slice.
+ var buf []uint8
+ doConvert := false
+ doHexDump := false
+ numEntries := v.Len()
+ if numEntries > 0 {
+ vt := v.Index(0).Type()
+ vts := vt.String()
+ switch {
+ // C types that need to be converted.
+ case cCharRE.MatchString(vts):
+ fallthrough
+ case cUnsignedCharRE.MatchString(vts):
+ fallthrough
+ case cUint8tCharRE.MatchString(vts):
+ doConvert = true
+
+ // Try to use existing uint8 slices and fall back to converting
+ // and copying if that fails.
+ case vt.Kind() == reflect.Uint8:
+ // We need an addressable interface to convert the type
+ // to a byte slice. However, the reflect package won't
+ // give us an interface on certain things like
+ // unexported struct fields in order to enforce
+ // visibility rules. We use unsafe, when available, to
+ // bypass these restrictions since this package does not
+ // mutate the values.
+ vs := v
+ if !vs.CanInterface() || !vs.CanAddr() {
+ vs = unsafeReflectValue(vs)
+ }
+ if !UnsafeDisabled {
+ vs = vs.Slice(0, numEntries)
+
+ // Use the existing uint8 slice if it can be
+ // type asserted.
+ iface := vs.Interface()
+ if slice, ok := iface.([]uint8); ok {
+ buf = slice
+ doHexDump = true
+ break
+ }
+ }
+
+ // The underlying data needs to be converted if it can't
+ // be type asserted to a uint8 slice.
+ doConvert = true
+ }
+
+ // Copy and convert the underlying type if needed.
+ if doConvert && vt.ConvertibleTo(uint8Type) {
+ // Convert and copy each element into a uint8 byte
+ // slice.
+ buf = make([]uint8, numEntries)
+ for i := 0; i < numEntries; i++ {
+ vv := v.Index(i)
+ buf[i] = uint8(vv.Convert(uint8Type).Uint())
+ }
+ doHexDump = true
+ }
+ }
+
+ // Hexdump the entire slice as needed.
+ if doHexDump {
+ indent := strings.Repeat(d.cs.Indent, d.depth)
+ str := indent + hex.Dump(buf)
+ str = strings.Replace(str, "\n", "\n"+indent, -1)
+ str = strings.TrimRight(str, d.cs.Indent)
+ d.w.Write([]byte(str))
+ return
+ }
+
+ // Recursively call dump for each item.
+ for i := 0; i < numEntries; i++ {
+ d.dump(d.unpackValue(v.Index(i)))
+ if i < (numEntries - 1) {
+ d.w.Write(commaNewlineBytes)
+ } else {
+ d.w.Write(newlineBytes)
+ }
+ }
+}
+
+// dump is the main workhorse for dumping a value. It uses the passed reflect
+// value to figure out what kind of object we are dealing with and formats it
+// appropriately. It is a recursive function, however circular data structures
+// are detected and handled properly.
+func (d *dumpState) dump(v reflect.Value) {
+ // Handle invalid reflect values immediately.
+ kind := v.Kind()
+ if kind == reflect.Invalid {
+ d.w.Write(invalidAngleBytes)
+ return
+ }
+
+ // Handle pointers specially.
+ if kind == reflect.Ptr {
+ d.indent()
+ d.dumpPtr(v)
+ return
+ }
+
+ // Print type information unless already handled elsewhere.
+ if !d.ignoreNextType {
+ d.indent()
+ d.w.Write(openParenBytes)
+ d.w.Write([]byte(v.Type().String()))
+ d.w.Write(closeParenBytes)
+ d.w.Write(spaceBytes)
+ }
+ d.ignoreNextType = false
+
+ // Display length and capacity if the built-in len and cap functions
+ // work with the value's kind and the len/cap itself is non-zero.
+ valueLen, valueCap := 0, 0
+ switch v.Kind() {
+ case reflect.Array, reflect.Slice, reflect.Chan:
+ valueLen, valueCap = v.Len(), v.Cap()
+ case reflect.Map, reflect.String:
+ valueLen = v.Len()
+ }
+ if valueLen != 0 || !d.cs.DisableCapacities && valueCap != 0 {
+ d.w.Write(openParenBytes)
+ if valueLen != 0 {
+ d.w.Write(lenEqualsBytes)
+ printInt(d.w, int64(valueLen), 10)
+ }
+ if !d.cs.DisableCapacities && valueCap != 0 {
+ if valueLen != 0 {
+ d.w.Write(spaceBytes)
+ }
+ d.w.Write(capEqualsBytes)
+ printInt(d.w, int64(valueCap), 10)
+ }
+ d.w.Write(closeParenBytes)
+ d.w.Write(spaceBytes)
+ }
+
+ // Call Stringer/error interfaces if they exist and the handle methods flag
+ // is enabled
+ if !d.cs.DisableMethods {
+ if (kind != reflect.Invalid) && (kind != reflect.Interface) {
+ if handled := handleMethods(d.cs, d.w, v); handled {
+ return
+ }
+ }
+ }
+
+ switch kind {
+ case reflect.Invalid:
+ // Do nothing. We should never get here since invalid has already
+ // been handled above.
+
+ case reflect.Bool:
+ printBool(d.w, v.Bool())
+
+ case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
+ printInt(d.w, v.Int(), 10)
+
+ case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint:
+ printUint(d.w, v.Uint(), 10)
+
+ case reflect.Float32:
+ printFloat(d.w, v.Float(), 32)
+
+ case reflect.Float64:
+ printFloat(d.w, v.Float(), 64)
+
+ case reflect.Complex64:
+ printComplex(d.w, v.Complex(), 32)
+
+ case reflect.Complex128:
+ printComplex(d.w, v.Complex(), 64)
+
+ case reflect.Slice:
+ if v.IsNil() {
+ d.w.Write(nilAngleBytes)
+ break
+ }
+ fallthrough
+
+ case reflect.Array:
+ d.w.Write(openBraceNewlineBytes)
+ d.depth++
+ if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) {
+ d.indent()
+ d.w.Write(maxNewlineBytes)
+ } else {
+ d.dumpSlice(v)
+ }
+ d.depth--
+ d.indent()
+ d.w.Write(closeBraceBytes)
+
+ case reflect.String:
+ d.w.Write([]byte(strconv.Quote(v.String())))
+
+ case reflect.Interface:
+ // The only time we should get here is for nil interfaces due to
+ // unpackValue calls.
+ if v.IsNil() {
+ d.w.Write(nilAngleBytes)
+ }
+
+ case reflect.Ptr:
+ // Do nothing. We should never get here since pointers have already
+ // been handled above.
+
+ case reflect.Map:
+ // nil maps should be indicated as different than empty maps
+ if v.IsNil() {
+ d.w.Write(nilAngleBytes)
+ break
+ }
+
+ d.w.Write(openBraceNewlineBytes)
+ d.depth++
+ if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) {
+ d.indent()
+ d.w.Write(maxNewlineBytes)
+ } else {
+ numEntries := v.Len()
+ keys := v.MapKeys()
+ if d.cs.SortKeys {
+ sortValues(keys, d.cs)
+ }
+ for i, key := range keys {
+ d.dump(d.unpackValue(key))
+ d.w.Write(colonSpaceBytes)
+ d.ignoreNextIndent = true
+ d.dump(d.unpackValue(v.MapIndex(key)))
+ if i < (numEntries - 1) {
+ d.w.Write(commaNewlineBytes)
+ } else {
+ d.w.Write(newlineBytes)
+ }
+ }
+ }
+ d.depth--
+ d.indent()
+ d.w.Write(closeBraceBytes)
+
+ case reflect.Struct:
+ d.w.Write(openBraceNewlineBytes)
+ d.depth++
+ if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) {
+ d.indent()
+ d.w.Write(maxNewlineBytes)
+ } else {
+ vt := v.Type()
+ numFields := v.NumField()
+ for i := 0; i < numFields; i++ {
+ d.indent()
+ vtf := vt.Field(i)
+ d.w.Write([]byte(vtf.Name))
+ d.w.Write(colonSpaceBytes)
+ d.ignoreNextIndent = true
+ d.dump(d.unpackValue(v.Field(i)))
+ if i < (numFields - 1) {
+ d.w.Write(commaNewlineBytes)
+ } else {
+ d.w.Write(newlineBytes)
+ }
+ }
+ }
+ d.depth--
+ d.indent()
+ d.w.Write(closeBraceBytes)
+
+ case reflect.Uintptr:
+ printHexPtr(d.w, uintptr(v.Uint()))
+
+ case reflect.UnsafePointer, reflect.Chan, reflect.Func:
+ printHexPtr(d.w, v.Pointer())
+
+ // There were not any other types at the time this code was written, but
+ // fall back to letting the default fmt package handle it in case any new
+ // types are added.
+ default:
+ if v.CanInterface() {
+ fmt.Fprintf(d.w, "%v", v.Interface())
+ } else {
+ fmt.Fprintf(d.w, "%v", v.String())
+ }
+ }
+}
+
+// fdump is a helper function to consolidate the logic from the various public
+// methods which take varying writers and config states.
+func fdump(cs *ConfigState, w io.Writer, a ...interface{}) {
+ for _, arg := range a {
+ if arg == nil {
+ w.Write(interfaceBytes)
+ w.Write(spaceBytes)
+ w.Write(nilAngleBytes)
+ w.Write(newlineBytes)
+ continue
+ }
+
+ d := dumpState{w: w, cs: cs}
+ d.pointers = make(map[uintptr]int)
+ d.dump(reflect.ValueOf(arg))
+ d.w.Write(newlineBytes)
+ }
+}
+
+// Fdump formats and displays the passed arguments to io.Writer w. It formats
+// exactly the same as Dump.
+func Fdump(w io.Writer, a ...interface{}) {
+ fdump(&Config, w, a...)
+}
+
+// Sdump returns a string with the passed arguments formatted exactly the same
+// as Dump.
+func Sdump(a ...interface{}) string {
+ var buf bytes.Buffer
+ fdump(&Config, &buf, a...)
+ return buf.String()
+}
+
+/*
+Dump displays the passed parameters to standard out with newlines, customizable
+indentation, and additional debug information such as complete types and all
+pointer addresses used to indirect to the final value. It provides the
+following features over the built-in printing facilities provided by the fmt
+package:
+
+ * Pointers are dereferenced and followed
+ * Circular data structures are detected and handled properly
+ * Custom Stringer/error interfaces are optionally invoked, including
+ on unexported types
+ * Custom types which only implement the Stringer/error interfaces via
+ a pointer receiver are optionally invoked when passing non-pointer
+ variables
+ * Byte arrays and slices are dumped like the hexdump -C command which
+ includes offsets, byte values in hex, and ASCII output
+
+The configuration options are controlled by an exported package global,
+spew.Config. See ConfigState for options documentation.
+
+See Fdump if you would prefer dumping to an arbitrary io.Writer or Sdump to
+get the formatted result as a string.
+*/
+func Dump(a ...interface{}) {
+ fdump(&Config, os.Stdout, a...)
+}
diff --git a/vendor/github.com/davecgh/go-spew/spew/format.go b/vendor/github.com/davecgh/go-spew/spew/format.go
new file mode 100644
index 00000000..c49875ba
--- /dev/null
+++ b/vendor/github.com/davecgh/go-spew/spew/format.go
@@ -0,0 +1,419 @@
+/*
+ * Copyright (c) 2013-2016 Dave Collins
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+package spew
+
+import (
+ "bytes"
+ "fmt"
+ "reflect"
+ "strconv"
+ "strings"
+)
+
+// supportedFlags is a list of all the character flags supported by fmt package.
+const supportedFlags = "0-+# "
+
+// formatState implements the fmt.Formatter interface and contains information
+// about the state of a formatting operation. The NewFormatter function can
+// be used to get a new Formatter which can be used directly as arguments
+// in standard fmt package printing calls.
+type formatState struct {
+ value interface{}
+ fs fmt.State
+ depth int
+ pointers map[uintptr]int
+ ignoreNextType bool
+ cs *ConfigState
+}
+
+// buildDefaultFormat recreates the original format string without precision
+// and width information to pass in to fmt.Sprintf in the case of an
+// unrecognized type. Unless new types are added to the language, this
+// function won't ever be called.
+func (f *formatState) buildDefaultFormat() (format string) {
+ buf := bytes.NewBuffer(percentBytes)
+
+ for _, flag := range supportedFlags {
+ if f.fs.Flag(int(flag)) {
+ buf.WriteRune(flag)
+ }
+ }
+
+ buf.WriteRune('v')
+
+ format = buf.String()
+ return format
+}
+
+// constructOrigFormat recreates the original format string including precision
+// and width information to pass along to the standard fmt package. This allows
+// automatic deferral of all format strings this package doesn't support.
+func (f *formatState) constructOrigFormat(verb rune) (format string) {
+ buf := bytes.NewBuffer(percentBytes)
+
+ for _, flag := range supportedFlags {
+ if f.fs.Flag(int(flag)) {
+ buf.WriteRune(flag)
+ }
+ }
+
+ if width, ok := f.fs.Width(); ok {
+ buf.WriteString(strconv.Itoa(width))
+ }
+
+ if precision, ok := f.fs.Precision(); ok {
+ buf.Write(precisionBytes)
+ buf.WriteString(strconv.Itoa(precision))
+ }
+
+ buf.WriteRune(verb)
+
+ format = buf.String()
+ return format
+}
+
+// unpackValue returns values inside of non-nil interfaces when possible and
+// ensures that types for values which have been unpacked from an interface
+// are displayed when the show types flag is also set.
+// This is useful for data types like structs, arrays, slices, and maps which
+// can contain varying types packed inside an interface.
+func (f *formatState) unpackValue(v reflect.Value) reflect.Value {
+ if v.Kind() == reflect.Interface {
+ f.ignoreNextType = false
+ if !v.IsNil() {
+ v = v.Elem()
+ }
+ }
+ return v
+}
+
+// formatPtr handles formatting of pointers by indirecting them as necessary.
+func (f *formatState) formatPtr(v reflect.Value) {
+ // Display nil if top level pointer is nil.
+ showTypes := f.fs.Flag('#')
+ if v.IsNil() && (!showTypes || f.ignoreNextType) {
+ f.fs.Write(nilAngleBytes)
+ return
+ }
+
+ // Remove pointers at or below the current depth from map used to detect
+ // circular refs.
+ for k, depth := range f.pointers {
+ if depth >= f.depth {
+ delete(f.pointers, k)
+ }
+ }
+
+ // Keep list of all dereferenced pointers to possibly show later.
+ pointerChain := make([]uintptr, 0)
+
+ // Figure out how many levels of indirection there are by derferencing
+ // pointers and unpacking interfaces down the chain while detecting circular
+ // references.
+ nilFound := false
+ cycleFound := false
+ indirects := 0
+ ve := v
+ for ve.Kind() == reflect.Ptr {
+ if ve.IsNil() {
+ nilFound = true
+ break
+ }
+ indirects++
+ addr := ve.Pointer()
+ pointerChain = append(pointerChain, addr)
+ if pd, ok := f.pointers[addr]; ok && pd < f.depth {
+ cycleFound = true
+ indirects--
+ break
+ }
+ f.pointers[addr] = f.depth
+
+ ve = ve.Elem()
+ if ve.Kind() == reflect.Interface {
+ if ve.IsNil() {
+ nilFound = true
+ break
+ }
+ ve = ve.Elem()
+ }
+ }
+
+ // Display type or indirection level depending on flags.
+ if showTypes && !f.ignoreNextType {
+ f.fs.Write(openParenBytes)
+ f.fs.Write(bytes.Repeat(asteriskBytes, indirects))
+ f.fs.Write([]byte(ve.Type().String()))
+ f.fs.Write(closeParenBytes)
+ } else {
+ if nilFound || cycleFound {
+ indirects += strings.Count(ve.Type().String(), "*")
+ }
+ f.fs.Write(openAngleBytes)
+ f.fs.Write([]byte(strings.Repeat("*", indirects)))
+ f.fs.Write(closeAngleBytes)
+ }
+
+ // Display pointer information depending on flags.
+ if f.fs.Flag('+') && (len(pointerChain) > 0) {
+ f.fs.Write(openParenBytes)
+ for i, addr := range pointerChain {
+ if i > 0 {
+ f.fs.Write(pointerChainBytes)
+ }
+ printHexPtr(f.fs, addr)
+ }
+ f.fs.Write(closeParenBytes)
+ }
+
+ // Display dereferenced value.
+ switch {
+ case nilFound == true:
+ f.fs.Write(nilAngleBytes)
+
+ case cycleFound == true:
+ f.fs.Write(circularShortBytes)
+
+ default:
+ f.ignoreNextType = true
+ f.format(ve)
+ }
+}
+
+// format is the main workhorse for providing the Formatter interface. It
+// uses the passed reflect value to figure out what kind of object we are
+// dealing with and formats it appropriately. It is a recursive function,
+// however circular data structures are detected and handled properly.
+func (f *formatState) format(v reflect.Value) {
+ // Handle invalid reflect values immediately.
+ kind := v.Kind()
+ if kind == reflect.Invalid {
+ f.fs.Write(invalidAngleBytes)
+ return
+ }
+
+ // Handle pointers specially.
+ if kind == reflect.Ptr {
+ f.formatPtr(v)
+ return
+ }
+
+ // Print type information unless already handled elsewhere.
+ if !f.ignoreNextType && f.fs.Flag('#') {
+ f.fs.Write(openParenBytes)
+ f.fs.Write([]byte(v.Type().String()))
+ f.fs.Write(closeParenBytes)
+ }
+ f.ignoreNextType = false
+
+ // Call Stringer/error interfaces if they exist and the handle methods
+ // flag is enabled.
+ if !f.cs.DisableMethods {
+ if (kind != reflect.Invalid) && (kind != reflect.Interface) {
+ if handled := handleMethods(f.cs, f.fs, v); handled {
+ return
+ }
+ }
+ }
+
+ switch kind {
+ case reflect.Invalid:
+ // Do nothing. We should never get here since invalid has already
+ // been handled above.
+
+ case reflect.Bool:
+ printBool(f.fs, v.Bool())
+
+ case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
+ printInt(f.fs, v.Int(), 10)
+
+ case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint:
+ printUint(f.fs, v.Uint(), 10)
+
+ case reflect.Float32:
+ printFloat(f.fs, v.Float(), 32)
+
+ case reflect.Float64:
+ printFloat(f.fs, v.Float(), 64)
+
+ case reflect.Complex64:
+ printComplex(f.fs, v.Complex(), 32)
+
+ case reflect.Complex128:
+ printComplex(f.fs, v.Complex(), 64)
+
+ case reflect.Slice:
+ if v.IsNil() {
+ f.fs.Write(nilAngleBytes)
+ break
+ }
+ fallthrough
+
+ case reflect.Array:
+ f.fs.Write(openBracketBytes)
+ f.depth++
+ if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) {
+ f.fs.Write(maxShortBytes)
+ } else {
+ numEntries := v.Len()
+ for i := 0; i < numEntries; i++ {
+ if i > 0 {
+ f.fs.Write(spaceBytes)
+ }
+ f.ignoreNextType = true
+ f.format(f.unpackValue(v.Index(i)))
+ }
+ }
+ f.depth--
+ f.fs.Write(closeBracketBytes)
+
+ case reflect.String:
+ f.fs.Write([]byte(v.String()))
+
+ case reflect.Interface:
+ // The only time we should get here is for nil interfaces due to
+ // unpackValue calls.
+ if v.IsNil() {
+ f.fs.Write(nilAngleBytes)
+ }
+
+ case reflect.Ptr:
+ // Do nothing. We should never get here since pointers have already
+ // been handled above.
+
+ case reflect.Map:
+ // nil maps should be indicated as different than empty maps
+ if v.IsNil() {
+ f.fs.Write(nilAngleBytes)
+ break
+ }
+
+ f.fs.Write(openMapBytes)
+ f.depth++
+ if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) {
+ f.fs.Write(maxShortBytes)
+ } else {
+ keys := v.MapKeys()
+ if f.cs.SortKeys {
+ sortValues(keys, f.cs)
+ }
+ for i, key := range keys {
+ if i > 0 {
+ f.fs.Write(spaceBytes)
+ }
+ f.ignoreNextType = true
+ f.format(f.unpackValue(key))
+ f.fs.Write(colonBytes)
+ f.ignoreNextType = true
+ f.format(f.unpackValue(v.MapIndex(key)))
+ }
+ }
+ f.depth--
+ f.fs.Write(closeMapBytes)
+
+ case reflect.Struct:
+ numFields := v.NumField()
+ f.fs.Write(openBraceBytes)
+ f.depth++
+ if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) {
+ f.fs.Write(maxShortBytes)
+ } else {
+ vt := v.Type()
+ for i := 0; i < numFields; i++ {
+ if i > 0 {
+ f.fs.Write(spaceBytes)
+ }
+ vtf := vt.Field(i)
+ if f.fs.Flag('+') || f.fs.Flag('#') {
+ f.fs.Write([]byte(vtf.Name))
+ f.fs.Write(colonBytes)
+ }
+ f.format(f.unpackValue(v.Field(i)))
+ }
+ }
+ f.depth--
+ f.fs.Write(closeBraceBytes)
+
+ case reflect.Uintptr:
+ printHexPtr(f.fs, uintptr(v.Uint()))
+
+ case reflect.UnsafePointer, reflect.Chan, reflect.Func:
+ printHexPtr(f.fs, v.Pointer())
+
+ // There were not any other types at the time this code was written, but
+ // fall back to letting the default fmt package handle it if any get added.
+ default:
+ format := f.buildDefaultFormat()
+ if v.CanInterface() {
+ fmt.Fprintf(f.fs, format, v.Interface())
+ } else {
+ fmt.Fprintf(f.fs, format, v.String())
+ }
+ }
+}
+
+// Format satisfies the fmt.Formatter interface. See NewFormatter for usage
+// details.
+func (f *formatState) Format(fs fmt.State, verb rune) {
+ f.fs = fs
+
+ // Use standard formatting for verbs that are not v.
+ if verb != 'v' {
+ format := f.constructOrigFormat(verb)
+ fmt.Fprintf(fs, format, f.value)
+ return
+ }
+
+ if f.value == nil {
+ if fs.Flag('#') {
+ fs.Write(interfaceBytes)
+ }
+ fs.Write(nilAngleBytes)
+ return
+ }
+
+ f.format(reflect.ValueOf(f.value))
+}
+
+// newFormatter is a helper function to consolidate the logic from the various
+// public methods which take varying config states.
+func newFormatter(cs *ConfigState, v interface{}) fmt.Formatter {
+ fs := &formatState{value: v, cs: cs}
+ fs.pointers = make(map[uintptr]int)
+ return fs
+}
+
+/*
+NewFormatter returns a custom formatter that satisfies the fmt.Formatter
+interface. As a result, it integrates cleanly with standard fmt package
+printing functions. The formatter is useful for inline printing of smaller data
+types similar to the standard %v format specifier.
+
+The custom formatter only responds to the %v (most compact), %+v (adds pointer
+addresses), %#v (adds types), or %#+v (adds types and pointer addresses) verb
+combinations. Any other verbs such as %x and %q will be sent to the the
+standard fmt package for formatting. In addition, the custom formatter ignores
+the width and precision arguments (however they will still work on the format
+specifiers not handled by the custom formatter).
+
+Typically this function shouldn't be called directly. It is much easier to make
+use of the custom formatter by calling one of the convenience functions such as
+Printf, Println, or Fprintf.
+*/
+func NewFormatter(v interface{}) fmt.Formatter {
+ return newFormatter(&Config, v)
+}
diff --git a/vendor/github.com/davecgh/go-spew/spew/spew.go b/vendor/github.com/davecgh/go-spew/spew/spew.go
new file mode 100644
index 00000000..32c0e338
--- /dev/null
+++ b/vendor/github.com/davecgh/go-spew/spew/spew.go
@@ -0,0 +1,148 @@
+/*
+ * Copyright (c) 2013-2016 Dave Collins
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+package spew
+
+import (
+ "fmt"
+ "io"
+)
+
+// Errorf is a wrapper for fmt.Errorf that treats each argument as if it were
+// passed with a default Formatter interface returned by NewFormatter. It
+// returns the formatted string as a value that satisfies error. See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Errorf(format, spew.NewFormatter(a), spew.NewFormatter(b))
+func Errorf(format string, a ...interface{}) (err error) {
+ return fmt.Errorf(format, convertArgs(a)...)
+}
+
+// Fprint is a wrapper for fmt.Fprint that treats each argument as if it were
+// passed with a default Formatter interface returned by NewFormatter. It
+// returns the number of bytes written and any write error encountered. See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Fprint(w, spew.NewFormatter(a), spew.NewFormatter(b))
+func Fprint(w io.Writer, a ...interface{}) (n int, err error) {
+ return fmt.Fprint(w, convertArgs(a)...)
+}
+
+// Fprintf is a wrapper for fmt.Fprintf that treats each argument as if it were
+// passed with a default Formatter interface returned by NewFormatter. It
+// returns the number of bytes written and any write error encountered. See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Fprintf(w, format, spew.NewFormatter(a), spew.NewFormatter(b))
+func Fprintf(w io.Writer, format string, a ...interface{}) (n int, err error) {
+ return fmt.Fprintf(w, format, convertArgs(a)...)
+}
+
+// Fprintln is a wrapper for fmt.Fprintln that treats each argument as if it
+// passed with a default Formatter interface returned by NewFormatter. See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Fprintln(w, spew.NewFormatter(a), spew.NewFormatter(b))
+func Fprintln(w io.Writer, a ...interface{}) (n int, err error) {
+ return fmt.Fprintln(w, convertArgs(a)...)
+}
+
+// Print is a wrapper for fmt.Print that treats each argument as if it were
+// passed with a default Formatter interface returned by NewFormatter. It
+// returns the number of bytes written and any write error encountered. See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Print(spew.NewFormatter(a), spew.NewFormatter(b))
+func Print(a ...interface{}) (n int, err error) {
+ return fmt.Print(convertArgs(a)...)
+}
+
+// Printf is a wrapper for fmt.Printf that treats each argument as if it were
+// passed with a default Formatter interface returned by NewFormatter. It
+// returns the number of bytes written and any write error encountered. See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Printf(format, spew.NewFormatter(a), spew.NewFormatter(b))
+func Printf(format string, a ...interface{}) (n int, err error) {
+ return fmt.Printf(format, convertArgs(a)...)
+}
+
+// Println is a wrapper for fmt.Println that treats each argument as if it were
+// passed with a default Formatter interface returned by NewFormatter. It
+// returns the number of bytes written and any write error encountered. See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Println(spew.NewFormatter(a), spew.NewFormatter(b))
+func Println(a ...interface{}) (n int, err error) {
+ return fmt.Println(convertArgs(a)...)
+}
+
+// Sprint is a wrapper for fmt.Sprint that treats each argument as if it were
+// passed with a default Formatter interface returned by NewFormatter. It
+// returns the resulting string. See NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Sprint(spew.NewFormatter(a), spew.NewFormatter(b))
+func Sprint(a ...interface{}) string {
+ return fmt.Sprint(convertArgs(a)...)
+}
+
+// Sprintf is a wrapper for fmt.Sprintf that treats each argument as if it were
+// passed with a default Formatter interface returned by NewFormatter. It
+// returns the resulting string. See NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Sprintf(format, spew.NewFormatter(a), spew.NewFormatter(b))
+func Sprintf(format string, a ...interface{}) string {
+ return fmt.Sprintf(format, convertArgs(a)...)
+}
+
+// Sprintln is a wrapper for fmt.Sprintln that treats each argument as if it
+// were passed with a default Formatter interface returned by NewFormatter. It
+// returns the resulting string. See NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Sprintln(spew.NewFormatter(a), spew.NewFormatter(b))
+func Sprintln(a ...interface{}) string {
+ return fmt.Sprintln(convertArgs(a)...)
+}
+
+// convertArgs accepts a slice of arguments and returns a slice of the same
+// length with each argument converted to a default spew Formatter interface.
+func convertArgs(args []interface{}) (formatters []interface{}) {
+ formatters = make([]interface{}, len(args))
+ for index, arg := range args {
+ formatters[index] = NewFormatter(arg)
+ }
+ return formatters
+}
diff --git a/vendor/github.com/eapache/go-resiliency/LICENSE b/vendor/github.com/eapache/go-resiliency/LICENSE
new file mode 100644
index 00000000..698a3f51
--- /dev/null
+++ b/vendor/github.com/eapache/go-resiliency/LICENSE
@@ -0,0 +1,22 @@
+The MIT License (MIT)
+
+Copyright (c) 2014 Evan Huus
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
+
diff --git a/vendor/github.com/eapache/go-resiliency/breaker/README.md b/vendor/github.com/eapache/go-resiliency/breaker/README.md
new file mode 100644
index 00000000..2d1b3d93
--- /dev/null
+++ b/vendor/github.com/eapache/go-resiliency/breaker/README.md
@@ -0,0 +1,34 @@
+circuit-breaker
+===============
+
+[![Build Status](https://travis-ci.org/eapache/go-resiliency.svg?branch=master)](https://travis-ci.org/eapache/go-resiliency)
+[![GoDoc](https://godoc.org/github.com/eapache/go-resiliency/breaker?status.svg)](https://godoc.org/github.com/eapache/go-resiliency/breaker)
+[![Code of Conduct](https://img.shields.io/badge/code%20of%20conduct-active-blue.svg)](https://eapache.github.io/conduct.html)
+
+The circuit-breaker resiliency pattern for golang.
+
+Creating a breaker takes three parameters:
+- error threshold (for opening the breaker)
+- success threshold (for closing the breaker)
+- timeout (how long to keep the breaker open)
+
+```go
+b := breaker.New(3, 1, 5*time.Second)
+
+for {
+ result := b.Run(func() error {
+ // communicate with some external service and
+ // return an error if the communication failed
+ return nil
+ })
+
+ switch result {
+ case nil:
+ // success!
+ case breaker.ErrBreakerOpen:
+ // our function wasn't run because the breaker was open
+ default:
+ // some other error
+ }
+}
+```
diff --git a/vendor/github.com/eapache/go-resiliency/breaker/breaker.go b/vendor/github.com/eapache/go-resiliency/breaker/breaker.go
new file mode 100644
index 00000000..f88ca724
--- /dev/null
+++ b/vendor/github.com/eapache/go-resiliency/breaker/breaker.go
@@ -0,0 +1,161 @@
+// Package breaker implements the circuit-breaker resiliency pattern for Go.
+package breaker
+
+import (
+ "errors"
+ "sync"
+ "sync/atomic"
+ "time"
+)
+
+// ErrBreakerOpen is the error returned from Run() when the function is not executed
+// because the breaker is currently open.
+var ErrBreakerOpen = errors.New("circuit breaker is open")
+
+const (
+ closed uint32 = iota
+ open
+ halfOpen
+)
+
+// Breaker implements the circuit-breaker resiliency pattern
+type Breaker struct {
+ errorThreshold, successThreshold int
+ timeout time.Duration
+
+ lock sync.Mutex
+ state uint32
+ errors, successes int
+ lastError time.Time
+}
+
+// New constructs a new circuit-breaker that starts closed.
+// From closed, the breaker opens if "errorThreshold" errors are seen
+// without an error-free period of at least "timeout". From open, the
+// breaker half-closes after "timeout". From half-open, the breaker closes
+// after "successThreshold" consecutive successes, or opens on a single error.
+func New(errorThreshold, successThreshold int, timeout time.Duration) *Breaker {
+ return &Breaker{
+ errorThreshold: errorThreshold,
+ successThreshold: successThreshold,
+ timeout: timeout,
+ }
+}
+
+// Run will either return ErrBreakerOpen immediately if the circuit-breaker is
+// already open, or it will run the given function and pass along its return
+// value. It is safe to call Run concurrently on the same Breaker.
+func (b *Breaker) Run(work func() error) error {
+ state := atomic.LoadUint32(&b.state)
+
+ if state == open {
+ return ErrBreakerOpen
+ }
+
+ return b.doWork(state, work)
+}
+
+// Go will either return ErrBreakerOpen immediately if the circuit-breaker is
+// already open, or it will run the given function in a separate goroutine.
+// If the function is run, Go will return nil immediately, and will *not* return
+// the return value of the function. It is safe to call Go concurrently on the
+// same Breaker.
+func (b *Breaker) Go(work func() error) error {
+ state := atomic.LoadUint32(&b.state)
+
+ if state == open {
+ return ErrBreakerOpen
+ }
+
+ // errcheck complains about ignoring the error return value, but
+ // that's on purpose; if you want an error from a goroutine you have to
+ // get it over a channel or something
+ go b.doWork(state, work)
+
+ return nil
+}
+
+func (b *Breaker) doWork(state uint32, work func() error) error {
+ var panicValue interface{}
+
+ result := func() error {
+ defer func() {
+ panicValue = recover()
+ }()
+ return work()
+ }()
+
+ if result == nil && panicValue == nil && state == closed {
+ // short-circuit the normal, success path without contending
+ // on the lock
+ return nil
+ }
+
+ // oh well, I guess we have to contend on the lock
+ b.processResult(result, panicValue)
+
+ if panicValue != nil {
+ // as close as Go lets us come to a "rethrow" although unfortunately
+ // we lose the original panicing location
+ panic(panicValue)
+ }
+
+ return result
+}
+
+func (b *Breaker) processResult(result error, panicValue interface{}) {
+ b.lock.Lock()
+ defer b.lock.Unlock()
+
+ if result == nil && panicValue == nil {
+ if b.state == halfOpen {
+ b.successes++
+ if b.successes == b.successThreshold {
+ b.closeBreaker()
+ }
+ }
+ } else {
+ if b.errors > 0 {
+ expiry := b.lastError.Add(b.timeout)
+ if time.Now().After(expiry) {
+ b.errors = 0
+ }
+ }
+
+ switch b.state {
+ case closed:
+ b.errors++
+ if b.errors == b.errorThreshold {
+ b.openBreaker()
+ } else {
+ b.lastError = time.Now()
+ }
+ case halfOpen:
+ b.openBreaker()
+ }
+ }
+}
+
+func (b *Breaker) openBreaker() {
+ b.changeState(open)
+ go b.timer()
+}
+
+func (b *Breaker) closeBreaker() {
+ b.changeState(closed)
+}
+
+func (b *Breaker) timer() {
+ time.Sleep(b.timeout)
+
+ b.lock.Lock()
+ defer b.lock.Unlock()
+
+ b.changeState(halfOpen)
+}
+
+func (b *Breaker) changeState(newState uint32) {
+ b.errors = 0
+ b.successes = 0
+ atomic.StoreUint32(&b.state, newState)
+}
diff --git a/vendor/github.com/eapache/go-xerial-snappy/LICENSE b/vendor/github.com/eapache/go-xerial-snappy/LICENSE
new file mode 100644
index 00000000..5bf3688d
--- /dev/null
+++ b/vendor/github.com/eapache/go-xerial-snappy/LICENSE
@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2016 Evan Huus
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/vendor/github.com/eapache/go-xerial-snappy/README.md b/vendor/github.com/eapache/go-xerial-snappy/README.md
new file mode 100644
index 00000000..3f2695c7
--- /dev/null
+++ b/vendor/github.com/eapache/go-xerial-snappy/README.md
@@ -0,0 +1,13 @@
+# go-xerial-snappy
+
+[![Build Status](https://travis-ci.org/eapache/go-xerial-snappy.svg?branch=master)](https://travis-ci.org/eapache/go-xerial-snappy)
+
+Xerial-compatible Snappy framing support for golang.
+
+Packages using Xerial for snappy encoding use a framing format incompatible with
+basically everything else in existence. This package wraps Go's built-in snappy
+package to support it.
+
+Apps that use this format include Apache Kafka (see
+https://github.com/dpkp/kafka-python/issues/126#issuecomment-35478921 for
+details).
diff --git a/vendor/github.com/eapache/go-xerial-snappy/snappy.go b/vendor/github.com/eapache/go-xerial-snappy/snappy.go
new file mode 100644
index 00000000..b8f8b51f
--- /dev/null
+++ b/vendor/github.com/eapache/go-xerial-snappy/snappy.go
@@ -0,0 +1,43 @@
+package snappy
+
+import (
+ "bytes"
+ "encoding/binary"
+
+ master "github.com/golang/snappy"
+)
+
+var xerialHeader = []byte{130, 83, 78, 65, 80, 80, 89, 0}
+
+// Encode encodes data as snappy with no framing header.
+func Encode(src []byte) []byte {
+ return master.Encode(nil, src)
+}
+
+// Decode decodes snappy data whether it is traditional unframed
+// or includes the xerial framing format.
+func Decode(src []byte) ([]byte, error) {
+ if !bytes.Equal(src[:8], xerialHeader) {
+ return master.Decode(nil, src)
+ }
+
+ var (
+ pos = uint32(16)
+ max = uint32(len(src))
+ dst = make([]byte, 0, len(src))
+ chunk []byte
+ err error
+ )
+ for pos < max {
+ size := binary.BigEndian.Uint32(src[pos : pos+4])
+ pos += 4
+
+ chunk, err = master.Decode(chunk, src[pos:pos+size])
+ if err != nil {
+ return nil, err
+ }
+ pos += size
+ dst = append(dst, chunk...)
+ }
+ return dst, nil
+}
diff --git a/vendor/github.com/eapache/queue/LICENSE b/vendor/github.com/eapache/queue/LICENSE
new file mode 100644
index 00000000..d5f36dbc
--- /dev/null
+++ b/vendor/github.com/eapache/queue/LICENSE
@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2014 Evan Huus
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
\ No newline at end of file
diff --git a/vendor/github.com/eapache/queue/README.md b/vendor/github.com/eapache/queue/README.md
new file mode 100644
index 00000000..8e782335
--- /dev/null
+++ b/vendor/github.com/eapache/queue/README.md
@@ -0,0 +1,16 @@
+Queue
+=====
+
+[![Build Status](https://travis-ci.org/eapache/queue.svg)](https://travis-ci.org/eapache/queue)
+[![GoDoc](https://godoc.org/github.com/eapache/queue?status.png)](https://godoc.org/github.com/eapache/queue)
+[![Code of Conduct](https://img.shields.io/badge/code%20of%20conduct-active-blue.svg)](https://eapache.github.io/conduct.html)
+
+A fast Golang queue using a ring-buffer, based on the version suggested by Dariusz Górecki.
+Using this instead of other, simpler, queue implementations (slice+append or linked list) provides
+substantial memory and time benefits, and fewer GC pauses.
+
+The queue implemented here is as fast as it is in part because it is *not* thread-safe.
+
+Follows semantic versioning using https://gopkg.in/ - import from
+[`gopkg.in/eapache/queue.v1`](https://gopkg.in/eapache/queue.v1)
+for guaranteed API stability.
diff --git a/vendor/github.com/eapache/queue/queue.go b/vendor/github.com/eapache/queue/queue.go
new file mode 100644
index 00000000..71d1acdf
--- /dev/null
+++ b/vendor/github.com/eapache/queue/queue.go
@@ -0,0 +1,102 @@
+/*
+Package queue provides a fast, ring-buffer queue based on the version suggested by Dariusz Górecki.
+Using this instead of other, simpler, queue implementations (slice+append or linked list) provides
+substantial memory and time benefits, and fewer GC pauses.
+
+The queue implemented here is as fast as it is for an additional reason: it is *not* thread-safe.
+*/
+package queue
+
+// minQueueLen is smallest capacity that queue may have.
+// Must be power of 2 for bitwise modulus: x % n == x & (n - 1).
+const minQueueLen = 16
+
+// Queue represents a single instance of the queue data structure.
+type Queue struct {
+ buf []interface{}
+ head, tail, count int
+}
+
+// New constructs and returns a new Queue.
+func New() *Queue {
+ return &Queue{
+ buf: make([]interface{}, minQueueLen),
+ }
+}
+
+// Length returns the number of elements currently stored in the queue.
+func (q *Queue) Length() int {
+ return q.count
+}
+
+// resizes the queue to fit exactly twice its current contents
+// this can result in shrinking if the queue is less than half-full
+func (q *Queue) resize() {
+ newBuf := make([]interface{}, q.count<<1)
+
+ if q.tail > q.head {
+ copy(newBuf, q.buf[q.head:q.tail])
+ } else {
+ n := copy(newBuf, q.buf[q.head:])
+ copy(newBuf[n:], q.buf[:q.tail])
+ }
+
+ q.head = 0
+ q.tail = q.count
+ q.buf = newBuf
+}
+
+// Add puts an element on the end of the queue.
+func (q *Queue) Add(elem interface{}) {
+ if q.count == len(q.buf) {
+ q.resize()
+ }
+
+ q.buf[q.tail] = elem
+ // bitwise modulus
+ q.tail = (q.tail + 1) & (len(q.buf) - 1)
+ q.count++
+}
+
+// Peek returns the element at the head of the queue. This call panics
+// if the queue is empty.
+func (q *Queue) Peek() interface{} {
+ if q.count <= 0 {
+ panic("queue: Peek() called on empty queue")
+ }
+ return q.buf[q.head]
+}
+
+// Get returns the element at index i in the queue. If the index is
+// invalid, the call will panic. This method accepts both positive and
+// negative index values. Index 0 refers to the first element, and
+// index -1 refers to the last.
+func (q *Queue) Get(i int) interface{} {
+ // If indexing backwards, convert to positive index.
+ if i < 0 {
+ i += q.count
+ }
+ if i < 0 || i >= q.count {
+ panic("queue: Get() called with index out of range")
+ }
+ // bitwise modulus
+ return q.buf[(q.head+i)&(len(q.buf)-1)]
+}
+
+// Remove removes and returns the element from the front of the queue. If the
+// queue is empty, the call will panic.
+func (q *Queue) Remove() interface{} {
+ if q.count <= 0 {
+ panic("queue: Remove() called on empty queue")
+ }
+ ret := q.buf[q.head]
+ q.buf[q.head] = nil
+ // bitwise modulus
+ q.head = (q.head + 1) & (len(q.buf) - 1)
+ q.count--
+ // Resize down if buffer 1/4 full.
+ if len(q.buf) > minQueueLen && (q.count<<2) == len(q.buf) {
+ q.resize()
+ }
+ return ret
+}
diff --git a/vendor/github.com/go-avro/avro/LICENSE b/vendor/github.com/go-avro/avro/LICENSE
new file mode 100644
index 00000000..5c304d1a
--- /dev/null
+++ b/vendor/github.com/go-avro/avro/LICENSE
@@ -0,0 +1,201 @@
+Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "{}"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright {yyyy} {name of copyright owner}
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/go-avro/avro/README.md b/vendor/github.com/go-avro/avro/README.md
new file mode 100644
index 00000000..8710d755
--- /dev/null
+++ b/vendor/github.com/go-avro/avro/README.md
@@ -0,0 +1,26 @@
+Apache Avro for Golang
+=====================
+(forked from `elodina/go-avro`)
+
+[![Build Status](https://travis-ci.org/go-avro/avro.svg?branch=master)](https://travis-ci.org/go-avro/avro)
+
+About This fork
+---------------
+
+This fork has separated from elodina/go-avro in December 2016 because of the project not responding to PR's since around May 2016. Have tried to contact them to get maintainer access but the original maintainer no longer is able to make those changes, so I've forked currently. If elodina/go-avro returns, they are free to merge all the changes I've made back.
+
+
+Documentation
+-------------
+
+Installation is as easy as follows:
+
+`go get gopkg.in/avro.v0`
+
+Some usage examples are located in [examples folder](https://github.com/go-avro/avro/tree/master/examples):
+
+* [DataFileReader](https://github.com/go-avro/avro/blob/master/examples/data_file/data_file.go)
+* [GenericDatumReader/Writer](https://github.com/go-avro/avro/blob/master/examples/generic_datum/generic_datum.go)
+* [SpecificDatumReader/Writer](https://github.com/go-avro/avro/blob/master/examples/specific_datum/specific_datum.go)
+* [Schema loading](https://github.com/go-avro/avro/blob/master/examples/load_schema/load_schema.go)
+* Code gen support available in [codegen folder](https://github.com/go-avro/avro/tree/master/codegen)
diff --git a/vendor/github.com/go-avro/avro/codegen.go b/vendor/github.com/go-avro/avro/codegen.go
new file mode 100644
index 00000000..08dada4f
--- /dev/null
+++ b/vendor/github.com/go-avro/avro/codegen.go
@@ -0,0 +1,622 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+contributor license agreements. See the NOTICE file distributed with
+codegen work for additional information regarding copyright ownership.
+The ASF licenses codegen file to You under the Apache License, Version 2.0
+(the "License"); you may not use codegen file except in compliance with
+the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License. */
+
+package avro
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "go/format"
+ "strings"
+)
+
+// CodeGenerator is a code generation tool for structs from given Avro schemas.
+type CodeGenerator struct {
+ rawSchemas []string
+
+ structs map[string]*bytes.Buffer
+ codeSnippets []*bytes.Buffer
+ schemaDefinitions *bytes.Buffer
+}
+
+// NewCodeGenerator creates a new CodeGenerator for given Avro schemas.
+func NewCodeGenerator(schemas []string) *CodeGenerator {
+ return &CodeGenerator{
+ rawSchemas: schemas,
+ structs: make(map[string]*bytes.Buffer),
+ codeSnippets: make([]*bytes.Buffer, 0),
+ schemaDefinitions: &bytes.Buffer{},
+ }
+}
+
+type recordSchemaInfo struct {
+ schema *RecordSchema
+ typeName string
+ schemaVarName string
+ schemaErrName string
+}
+
+func newRecordSchemaInfo(schema *RecordSchema) (*recordSchemaInfo, error) {
+ if schema.Name == "" {
+ return nil, errors.New("Name not set.")
+ }
+
+ typeName := fmt.Sprintf("%s%s", strings.ToUpper(schema.Name[:1]), schema.Name[1:])
+
+ return &recordSchemaInfo{
+ schema: schema,
+ typeName: typeName,
+ schemaVarName: fmt.Sprintf("_%s_schema", typeName),
+ schemaErrName: fmt.Sprintf("_%s_schema_err", typeName),
+ }, nil
+}
+
+type enumSchemaInfo struct {
+ schema *EnumSchema
+ typeName string
+}
+
+func newEnumSchemaInfo(schema *EnumSchema) (*enumSchemaInfo, error) {
+ if schema.Name == "" {
+ return nil, errors.New("Name not set.")
+ }
+
+ return &enumSchemaInfo{
+ schema: schema,
+ typeName: fmt.Sprintf("%s%s", strings.ToUpper(schema.Name[:1]), schema.Name[1:]),
+ }, nil
+}
+
+// Generate generates source code for Avro schemas specified on creation.
+// The ouput is Go formatted source code that contains struct definitions for all given schemas.
+// May return an error if code generation fails, e.g. due to unparsable schema.
+func (codegen *CodeGenerator) Generate() (string, error) {
+ for index, rawSchema := range codegen.rawSchemas {
+ parsedSchema, err := ParseSchema(rawSchema)
+ if err != nil {
+ return "", err
+ }
+
+ schema, ok := parsedSchema.(*RecordSchema)
+ if !ok {
+ return "", errors.New("Not a Record schema.")
+ }
+ schemaInfo, err := newRecordSchemaInfo(schema)
+ if err != nil {
+ return "", err
+ }
+
+ buffer := &bytes.Buffer{}
+ codegen.codeSnippets = append(codegen.codeSnippets, buffer)
+
+ // write package and import only once
+ if index == 0 {
+ err = codegen.writePackageName(schemaInfo)
+ if err != nil {
+ return "", err
+ }
+
+ err = codegen.writeImportStatement()
+ if err != nil {
+ return "", err
+ }
+ }
+
+ err = codegen.writeStruct(schemaInfo)
+ if err != nil {
+ return "", err
+ }
+ }
+
+ formatted, err := format.Source([]byte(codegen.collectResult()))
+ if err != nil {
+ return "", err
+ }
+
+ return string(formatted), nil
+}
+
+func (codegen *CodeGenerator) collectResult() string {
+ results := make([]string, len(codegen.codeSnippets)+1)
+ for i, snippet := range codegen.codeSnippets {
+ results[i] = snippet.String()
+ }
+ results[len(results)-1] = codegen.schemaDefinitions.String()
+
+ return strings.Join(results, "\n")
+}
+
+func (codegen *CodeGenerator) writePackageName(info *recordSchemaInfo) error {
+ buffer := codegen.codeSnippets[0]
+ _, err := buffer.WriteString("package ")
+ if err != nil {
+ return err
+ }
+
+ if info.schema.Namespace == "" {
+ info.schema.Namespace = "avro"
+ }
+
+ packages := strings.Split(info.schema.Namespace, ".")
+ _, err = buffer.WriteString(fmt.Sprintf("%s\n\n", packages[len(packages)-1]))
+ if err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (codegen *CodeGenerator) writeStruct(info *recordSchemaInfo) error {
+ buffer := &bytes.Buffer{}
+ if _, exists := codegen.structs[info.typeName]; exists {
+ return nil
+ }
+
+ codegen.codeSnippets = append(codegen.codeSnippets, buffer)
+ codegen.structs[info.typeName] = buffer
+
+ err := codegen.writeStructSchemaVar(info)
+ if err != nil {
+ return err
+ }
+
+ err = codegen.writeDoc("", info.schema.Doc, buffer)
+ if err != nil {
+ return err
+ }
+
+ err = codegen.writeStructDefinition(info, buffer)
+ if err != nil {
+ return err
+ }
+
+ _, err = buffer.WriteString("\n\n")
+ if err != nil {
+ return err
+ }
+
+ err = codegen.writeStructConstructor(info, buffer)
+ if err != nil {
+ return err
+ }
+
+ _, err = buffer.WriteString("\n\n")
+ if err != nil {
+ return err
+ }
+
+ return codegen.writeSchemaGetter(info, buffer)
+}
+
+func (codegen *CodeGenerator) writeEnum(info *enumSchemaInfo) error {
+ buffer := &bytes.Buffer{}
+ if _, exists := codegen.structs[info.typeName]; exists {
+ return nil
+ }
+
+ codegen.codeSnippets = append(codegen.codeSnippets, buffer)
+ codegen.structs[info.typeName] = buffer
+
+ err := codegen.writeEnumConstants(info, buffer)
+ if err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (codegen *CodeGenerator) writeEnumConstants(info *enumSchemaInfo, buffer *bytes.Buffer) error {
+ if len(info.schema.Symbols) == 0 {
+ return nil
+ }
+
+ _, err := buffer.WriteString(fmt.Sprintf("// Enum values for %s\n", info.typeName))
+ if err != nil {
+ return err
+ }
+
+ _, err = buffer.WriteString("const (")
+ if err != nil {
+ return err
+ }
+
+ for index, symbol := range info.schema.Symbols {
+ _, err = buffer.WriteString(fmt.Sprintf("%s_%s int32 = %d\n", info.typeName, symbol, index))
+ if err != nil {
+ return err
+ }
+ }
+ _, err = buffer.WriteString(")")
+ return err
+}
+
+func (codegen *CodeGenerator) writeImportStatement() error {
+ buffer := codegen.codeSnippets[0]
+ _, err := buffer.WriteString(`import "github.com/elodina/go-avro"`)
+ if err != nil {
+ return err
+ }
+ _, err = buffer.WriteString("\n")
+ return err
+}
+
+func (codegen *CodeGenerator) writeStructSchemaVar(info *recordSchemaInfo) error {
+ buffer := codegen.schemaDefinitions
+ _, err := buffer.WriteString("// Generated by codegen. Please do not modify.\n")
+ if err != nil {
+ return err
+ }
+ _, err = buffer.WriteString(fmt.Sprintf("var %s, %s = avro.ParseSchema(`%s`)\n\n", info.schemaVarName, info.schemaErrName, strings.Replace(info.schema.String(), "`", "'", -1)))
+ return err
+}
+
+func (codegen *CodeGenerator) writeDoc(prefix string, doc string, buffer *bytes.Buffer) error {
+ if doc == "" {
+ return nil
+ }
+
+ _, err := buffer.WriteString(fmt.Sprintf("%s/* %s */\n", prefix, doc))
+ return err
+}
+
+func (codegen *CodeGenerator) writeStructDefinition(info *recordSchemaInfo, buffer *bytes.Buffer) error {
+ _, err := buffer.WriteString(fmt.Sprintf("type %s struct {\n", info.typeName))
+ if err != nil {
+ return err
+ }
+
+ for i := 0; i < len(info.schema.Fields); i++ {
+ err := codegen.writeStructField(info.schema.Fields[i], buffer)
+ if err != nil {
+ return err
+ }
+ }
+
+ _, err = buffer.WriteString("}")
+ return err
+}
+
+func (codegen *CodeGenerator) writeStructField(field *SchemaField, buffer *bytes.Buffer) error {
+ err := codegen.writeDoc("\t", field.Doc, buffer)
+ if err != nil {
+ return err
+ }
+ if field.Name == "" {
+ return errors.New("Empty field name.")
+ }
+
+ _, err = buffer.WriteString(fmt.Sprintf("\t%s%s ", strings.ToUpper(field.Name[:1]), field.Name[1:]))
+ if err != nil {
+ return err
+ }
+
+ err = codegen.writeStructFieldType(field.Type, buffer)
+ if err != nil {
+ return err
+ }
+
+ _, err = buffer.WriteString("\n")
+ return err
+}
+
+func (codegen *CodeGenerator) writeStructFieldType(schema Schema, buffer *bytes.Buffer) error {
+ var err error
+ switch schema.Type() {
+ case Null:
+ _, err = buffer.WriteString("interface{}")
+ case Boolean:
+ _, err = buffer.WriteString("bool")
+ case String:
+ _, err = buffer.WriteString("string")
+ case Int:
+ _, err = buffer.WriteString("int32")
+ case Long:
+ _, err = buffer.WriteString("int64")
+ case Float:
+ _, err = buffer.WriteString("float32")
+ case Double:
+ _, err = buffer.WriteString("float64")
+ case Bytes:
+ _, err = buffer.WriteString("[]byte")
+ case Array:
+ {
+ _, err = buffer.WriteString("[]")
+ if err != nil {
+ return err
+ }
+ err = codegen.writeStructFieldType(schema.(*ArraySchema).Items, buffer)
+ }
+ case Map:
+ {
+ _, err = buffer.WriteString("map[string]")
+ if err != nil {
+ return err
+ }
+ err = codegen.writeStructFieldType(schema.(*MapSchema).Values, buffer)
+ }
+ case Enum:
+ {
+ enumSchema := schema.(*EnumSchema)
+ info, err := newEnumSchemaInfo(enumSchema)
+ if err != nil {
+ return err
+ }
+
+ _, err = buffer.WriteString("*avro.GenericEnum")
+ if err != nil {
+ return err
+ }
+
+ return codegen.writeEnum(info)
+ }
+ case Union:
+ {
+ err = codegen.writeStructUnionType(schema.(*UnionSchema), buffer)
+ }
+ case Fixed:
+ _, err = buffer.WriteString("[]byte")
+ case Record:
+ {
+ _, err = buffer.WriteString("*")
+ if err != nil {
+ return err
+ }
+ recordSchema := schema.(*RecordSchema)
+
+ schemaInfo, err := newRecordSchemaInfo(recordSchema)
+ if err != nil {
+ return err
+ }
+
+ _, err = buffer.WriteString(schemaInfo.typeName)
+ if err != nil {
+ return err
+ }
+
+ return codegen.writeStruct(schemaInfo)
+ }
+ case Recursive:
+ {
+ _, err = buffer.WriteString("*")
+ if err != nil {
+ return err
+ }
+ _, err = buffer.WriteString(schema.(*RecursiveSchema).GetName())
+ }
+ }
+
+ return err
+}
+
+func (codegen *CodeGenerator) writeStructUnionType(schema *UnionSchema, buffer *bytes.Buffer) error {
+ var unionType Schema
+ if schema.Types[0].Type() == Null {
+ unionType = schema.Types[1]
+ } else if schema.Types[1].Type() == Null {
+ unionType = schema.Types[0]
+ }
+
+ if unionType != nil && codegen.isNullable(unionType) {
+ return codegen.writeStructFieldType(unionType, buffer)
+ }
+
+ _, err := buffer.WriteString("interface{}")
+ return err
+}
+
+func (codegen *CodeGenerator) isNullable(schema Schema) bool {
+ switch schema.(type) {
+ case *BooleanSchema, *IntSchema, *LongSchema, *FloatSchema, *DoubleSchema, *StringSchema:
+ return false
+ default:
+ return true
+ }
+}
+
+func (codegen *CodeGenerator) writeStructConstructor(info *recordSchemaInfo, buffer *bytes.Buffer) error {
+ _, err := buffer.WriteString(fmt.Sprintf("func New%s() *%s {\n\treturn &%s{\n", info.typeName, info.typeName, info.typeName))
+ if err != nil {
+ return err
+ }
+
+ for i := 0; i < len(info.schema.Fields); i++ {
+ err = codegen.writeStructConstructorField(info, info.schema.Fields[i], buffer)
+ if err != nil {
+ return err
+ }
+ }
+
+ _, err = buffer.WriteString("\t}\n}")
+ return err
+}
+
+func (codegen *CodeGenerator) writeStructConstructorField(info *recordSchemaInfo, field *SchemaField, buffer *bytes.Buffer) error {
+ if !codegen.needWriteField(field) {
+ return nil
+ }
+
+ err := codegen.writeStructConstructorFieldName(field, buffer)
+ if err != nil {
+ return err
+ }
+ err = codegen.writeStructConstructorFieldValue(info, field, buffer)
+ if err != nil {
+ return err
+ }
+
+ _, err = buffer.WriteString(",\n")
+ return err
+}
+
+func (codegen *CodeGenerator) writeStructConstructorFieldValue(info *recordSchemaInfo, field *SchemaField, buffer *bytes.Buffer) error {
+ var err error
+ switch field.Type.(type) {
+ case *NullSchema:
+ _, err = buffer.WriteString("nil")
+ case *BooleanSchema:
+ _, err = buffer.WriteString(fmt.Sprintf("%t", field.Default))
+ case *StringSchema:
+ {
+ _, err = buffer.WriteString(`"`)
+ if err != nil {
+ return err
+ }
+ _, err = buffer.WriteString(fmt.Sprintf("%s", field.Default))
+ if err != nil {
+ return err
+ }
+ _, err = buffer.WriteString(`"`)
+ }
+ case *IntSchema:
+ {
+ defaultValue, ok := field.Default.(float64)
+ if !ok {
+ return fmt.Errorf("Invalid default value for %s field of type %s", field.Name, field.Type.GetName())
+ }
+ _, err = buffer.WriteString(fmt.Sprintf("int32(%d)", int32(defaultValue)))
+ }
+ case *LongSchema:
+ {
+ defaultValue, ok := field.Default.(float64)
+ if !ok {
+ return fmt.Errorf("Invalid default value for %s field of type %s", field.Name, field.Type.GetName())
+ }
+ _, err = buffer.WriteString(fmt.Sprintf("int64(%d)", int64(defaultValue)))
+ }
+ case *FloatSchema:
+ {
+ defaultValue, ok := field.Default.(float64)
+ if !ok {
+ return fmt.Errorf("Invalid default value for %s field of type %s", field.Name, field.Type.GetName())
+ }
+ _, err = buffer.WriteString(fmt.Sprintf("float32(%f)", float32(defaultValue)))
+ }
+ case *DoubleSchema:
+ {
+ defaultValue, ok := field.Default.(float64)
+ if !ok {
+ return fmt.Errorf("Invalid default value for %s field of type %s", field.Name, field.Type.GetName())
+ }
+ _, err = buffer.WriteString(fmt.Sprintf("float64(%f)", defaultValue))
+ }
+ case *BytesSchema:
+ _, err = buffer.WriteString("[]byte{}")
+ case *ArraySchema:
+ {
+ _, err = buffer.WriteString("make(")
+ if err != nil {
+ return err
+ }
+ err = codegen.writeStructFieldType(field.Type, buffer)
+ if err != nil {
+ return err
+ }
+ _, err = buffer.WriteString(", 0)")
+ }
+ case *MapSchema:
+ {
+ _, err = buffer.WriteString("make(")
+ if err != nil {
+ return err
+ }
+ err = codegen.writeStructFieldType(field.Type, buffer)
+ if err != nil {
+ return err
+ }
+ _, err = buffer.WriteString(")")
+ }
+ case *EnumSchema:
+ {
+ _, err = buffer.WriteString("avro.NewGenericEnum([]string{")
+ if err != nil {
+ return err
+ }
+ enum := field.Type.(*EnumSchema)
+ for _, symbol := range enum.Symbols {
+ _, err = buffer.WriteString(fmt.Sprintf(`"%s",`, symbol))
+ if err != nil {
+ return err
+ }
+ }
+ _, err = buffer.WriteString("})")
+ }
+ case *UnionSchema:
+ {
+ union := field.Type.(*UnionSchema)
+ unionField := &SchemaField{}
+ *unionField = *field
+ unionField.Type = union.Types[0]
+ return codegen.writeStructConstructorFieldValue(info, unionField, buffer)
+ }
+ case *FixedSchema:
+ {
+ _, err = buffer.WriteString(fmt.Sprintf("make([]byte, %d)", field.Type.(*FixedSchema).Size))
+ }
+ case *RecordSchema:
+ {
+ info, err := newRecordSchemaInfo(field.Type.(*RecordSchema))
+ if err != nil {
+ return err
+ }
+ _, err = buffer.WriteString(fmt.Sprintf("New%s()", info.typeName))
+ if err != nil {
+ return err
+ }
+ }
+ }
+
+ return err
+}
+
+func (codegen *CodeGenerator) needWriteField(field *SchemaField) bool {
+ if field.Default != nil {
+ return true
+ }
+
+ switch field.Type.(type) {
+ case *BytesSchema, *ArraySchema, *MapSchema, *EnumSchema, *FixedSchema, *RecordSchema:
+ return true
+ }
+
+ return false
+}
+
+func (codegen *CodeGenerator) writeStructConstructorFieldName(field *SchemaField, buffer *bytes.Buffer) error {
+ _, err := buffer.WriteString("\t\t")
+ if err != nil {
+ return err
+ }
+ fieldName := fmt.Sprintf("%s%s", strings.ToUpper(field.Name[:1]), field.Name[1:])
+ _, err = buffer.WriteString(fieldName)
+ if err != nil {
+ return err
+ }
+ _, err = buffer.WriteString(": ")
+ return err
+}
+
+func (codegen *CodeGenerator) writeSchemaGetter(info *recordSchemaInfo, buffer *bytes.Buffer) error {
+ _, err := buffer.WriteString(fmt.Sprintf("func (o *%s) Schema() avro.Schema {\n\t", info.typeName))
+ if err != nil {
+ return err
+ }
+ _, err = buffer.WriteString(fmt.Sprintf("if %s != nil {\n\t\tpanic(%s)\n\t}\n\t", info.schemaErrName, info.schemaErrName))
+ if err != nil {
+ return err
+ }
+ _, err = buffer.WriteString(fmt.Sprintf("return %s\n}", info.schemaVarName))
+ return err
+}
diff --git a/vendor/github.com/go-avro/avro/data_file.go b/vendor/github.com/go-avro/avro/data_file.go
new file mode 100644
index 00000000..df3d8884
--- /dev/null
+++ b/vendor/github.com/go-avro/avro/data_file.go
@@ -0,0 +1,301 @@
+package avro
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "math"
+)
+
+// Support decoding the avro Object Container File format.
+// Spec: http://avro.apache.org/docs/1.7.7/spec.html#Object+Container+Files
+
+const objHeaderSchemaRaw = `{"type": "record", "name": "org.apache.avro.file.Header",
+ "fields" : [
+ {"name": "magic", "type": {"type": "fixed", "name": "Magic", "size": 4}},
+ {"name": "meta", "type": {"type": "map", "values": "bytes"}},
+ {"name": "sync", "type": {"type": "fixed", "name": "Sync", "size": 16}}
+ ]
+}`
+
+var objHeaderSchema = MustParseSchema(objHeaderSchemaRaw)
+
+const (
+ version byte = 1
+ syncSize = 16
+ schemaKey = "avro.schema"
+ codecKey = "avro.codec"
+)
+
+var magic = []byte{'O', 'b', 'j', version}
+
+// DataFileReader is a reader for Avro Object Container Files.
+// More here: https://avro.apache.org/docs/current/spec.html#Object+Container+Files
+type DataFileReader struct {
+ data []byte
+ header *objFileHeader
+ block *DataBlock
+ dec Decoder
+ blockDecoder Decoder
+ datum DatumReader
+}
+
+// The header for object container files
+type objFileHeader struct {
+ Magic []byte `avro:"magic"`
+ Meta map[string][]byte `avro:"meta"`
+ Sync []byte `avro:"sync"`
+}
+
+func readObjFileHeader(dec *BinaryDecoder) (*objFileHeader, error) {
+ reader := NewSpecificDatumReader()
+ reader.SetSchema(objHeaderSchema)
+ header := &objFileHeader{}
+ err := reader.Read(header, dec)
+ return header, err
+}
+
+// NewDataFileReader creates a new DataFileReader for a given file and using the given DatumReader to read the data from that file.
+// May return an error if the file contains invalid data or is just missing.
+func NewDataFileReader(filename string, datumReader DatumReader) (*DataFileReader, error) {
+ buf, err := ioutil.ReadFile(filename)
+ if err != nil {
+ return nil, err
+ }
+
+ return newDataFileReaderBytes(buf, datumReader)
+}
+
+// separated out mainly for testing currently, will be refactored later for io.Reader paradigm
+func newDataFileReaderBytes(buf []byte, datumReader DatumReader) (reader *DataFileReader, err error) {
+ if len(buf) < len(magic) || !bytes.Equal(magic, buf[0:4]) {
+ return nil, NotAvroFile
+ }
+
+ dec := NewBinaryDecoder(buf)
+ blockDecoder := NewBinaryDecoder(nil)
+ reader = &DataFileReader{
+ data: buf,
+ dec: dec,
+ blockDecoder: blockDecoder,
+ datum: datumReader,
+ }
+
+ if reader.header, err = readObjFileHeader(dec); err != nil {
+ return nil, err
+ }
+
+ schema, err := ParseSchema(string(reader.header.Meta[schemaKey]))
+ if err != nil {
+ return nil, err
+ }
+ reader.datum.SetSchema(schema)
+ reader.block = &DataBlock{}
+
+ if reader.hasNextBlock() {
+ if err := reader.NextBlock(); err != nil {
+ return nil, err
+ }
+ }
+
+ return reader, nil
+}
+
+// Seek switches the reading position in this DataFileReader to a provided value.
+func (reader *DataFileReader) Seek(pos int64) {
+ reader.dec.Seek(pos)
+}
+
+func (reader *DataFileReader) hasNext() (bool, error) {
+ if reader.block.BlockRemaining == 0 {
+ if int64(reader.block.BlockSize) != reader.blockDecoder.Tell() {
+ return false, BlockNotFinished
+ }
+ if reader.hasNextBlock() {
+ if err := reader.NextBlock(); err != nil {
+ return false, err
+ }
+ } else {
+ return false, nil
+ }
+ }
+ return true, nil
+}
+
+func (reader *DataFileReader) hasNextBlock() bool {
+ return int64(len(reader.data)) > reader.dec.Tell()
+}
+
+// Next reads the next value from file and fills the given value with data.
+// First return value indicates whether the read was successful.
+// Second return value indicates whether there was an error while reading data.
+// Returns (false, nil) when no more data left to read.
+func (reader *DataFileReader) Next(v interface{}) (bool, error) {
+ hasNext, err := reader.hasNext()
+ if err != nil {
+ return false, err
+ }
+
+ if hasNext {
+ err := reader.datum.Read(v, reader.blockDecoder)
+ if err != nil {
+ return false, err
+ }
+ reader.block.BlockRemaining--
+ return true, nil
+ }
+
+ return false, nil
+}
+
+// NextBlock tells this DataFileReader to skip current block and move to next one.
+// May return an error if the block is malformed or no more blocks left to read.
+func (reader *DataFileReader) NextBlock() error {
+ blockCount, err := reader.dec.ReadLong()
+ if err != nil {
+ return err
+ }
+
+ blockSize, err := reader.dec.ReadLong()
+ if err != nil {
+ return err
+ }
+
+ if blockSize > math.MaxInt32 || blockSize < 0 {
+ return fmt.Errorf("Block size invalid or too large: %d", blockSize)
+ }
+
+ block := reader.block
+ if block.Data == nil || int64(len(block.Data)) < blockSize {
+ block.Data = make([]byte, blockSize)
+ }
+ block.BlockRemaining = blockCount
+ block.NumEntries = blockCount
+ block.BlockSize = int(blockSize)
+ err = reader.dec.ReadFixedWithBounds(block.Data, 0, int(block.BlockSize))
+ if err != nil {
+ return err
+ }
+ syncBuffer := make([]byte, syncSize)
+ err = reader.dec.ReadFixed(syncBuffer)
+ if err != nil {
+ return err
+ }
+ if !bytes.Equal(syncBuffer, reader.header.Sync) {
+ return InvalidSync
+ }
+ reader.blockDecoder.SetBlock(reader.block)
+
+ return nil
+}
+
+////////// DATA FILE WRITER
+
+// DataFileWriter lets you write object container files.
+type DataFileWriter struct {
+ output io.Writer
+ outputEnc *BinaryEncoder
+ datumWriter DatumWriter
+ sync []byte
+
+ // current block is buffered until flush
+ blockBuf *bytes.Buffer
+ blockCount int64
+ blockEnc *BinaryEncoder
+}
+
+// NewDataFileWriter creates a new DataFileWriter for given output and schema using the given DatumWriter to write the data to that Writer.
+// May return an error if writing fails.
+func NewDataFileWriter(output io.Writer, schema Schema, datumWriter DatumWriter) (writer *DataFileWriter, err error) {
+ encoder := NewBinaryEncoder(output)
+ datumWriter.SetSchema(schema)
+ sync := []byte("1234567890abcdef") // TODO come up with other sync value
+
+ header := &objFileHeader{
+ Magic: magic,
+ Meta: map[string][]byte{
+ schemaKey: []byte(schema.String()),
+ codecKey: []byte("null"),
+ },
+ Sync: sync,
+ }
+ headerWriter := NewSpecificDatumWriter()
+ headerWriter.SetSchema(objHeaderSchema)
+ if err = headerWriter.Write(header, encoder); err != nil {
+ return
+ }
+ blockBuf := &bytes.Buffer{}
+ writer = &DataFileWriter{
+ output: output,
+ outputEnc: encoder,
+ datumWriter: datumWriter,
+ sync: sync,
+ blockBuf: blockBuf,
+ blockEnc: NewBinaryEncoder(blockBuf),
+ }
+
+ return
+}
+
+// Write out a single datum.
+//
+// Encoded datums are buffered internally and will not be written to the
+// underlying io.Writer until Flush() is called.
+func (w *DataFileWriter) Write(v interface{}) error {
+ w.blockCount++
+ err := w.datumWriter.Write(v, w.blockEnc)
+ return err
+}
+
+// Flush out any previously written datums to our underlying io.Writer.
+// Does nothing if no datums had previously been written.
+//
+// It's up to the library user to decide how often to flush; doing it
+// often will spend a lot of time on tiny I/O but save memory.
+func (w *DataFileWriter) Flush() error {
+ if w.blockCount > 0 {
+ return w.actuallyFlush()
+ }
+ return nil
+}
+
+func (w *DataFileWriter) actuallyFlush() error {
+ // Write the block count and length directly to output
+ w.outputEnc.WriteLong(w.blockCount)
+ w.outputEnc.WriteLong(int64(w.blockBuf.Len()))
+
+ // copy the buffer which is the block buf to output
+ _, err := io.Copy(w.output, w.blockBuf)
+ if err != nil {
+ return err
+ }
+
+ // write the sync bytes
+ _, err = w.output.Write(w.sync)
+ if err != nil {
+ return err
+ }
+
+ w.blockBuf.Reset() // allow blockbuf's internal memory to be reused
+ w.blockCount = 0
+ return nil
+}
+
+// Close this DataFileWriter.
+// This is required to finish out the data file format.
+// After Close() is called, this DataFileWriter cannot be used anymore.
+func (w *DataFileWriter) Close() error {
+ err := w.Flush() // flush anything remaining
+ if err == nil {
+ // Do an empty flush to signal end of data file format
+ err = w.actuallyFlush()
+
+ if err == nil {
+ // Clean up references.
+ w.output, w.outputEnc, w.datumWriter = nil, nil, nil
+ w.blockBuf, w.blockEnc = nil, nil
+ }
+ }
+ return err
+}
diff --git a/vendor/github.com/go-avro/avro/datum_reader.go b/vendor/github.com/go-avro/avro/datum_reader.go
new file mode 100644
index 00000000..1f4ba250
--- /dev/null
+++ b/vendor/github.com/go-avro/avro/datum_reader.go
@@ -0,0 +1,605 @@
+package avro
+
+import (
+ "errors"
+ "fmt"
+ "reflect"
+ "sync"
+)
+
+// ***********************
+// NOTICE this file was changed beginning in November 2016 by the team maintaining
+// https://github.com/go-avro/avro. This notice is required to be here due to the
+// terms of the Apache license, see LICENSE for details.
+// ***********************
+
+// Reader is an interface that may be implemented to avoid using runtime reflection during deserialization.
+// Implementing it is optional and may be used as an optimization. Falls back to using reflection if not implemented.
+type Reader interface {
+ Read(dec Decoder) error
+}
+
+// DatumReader is an interface that is responsible for reading structured data according to schema from a decoder
+type DatumReader interface {
+ // Reads a single structured entry using this DatumReader according to provided Schema.
+ // Accepts a value to fill with data and a Decoder to read from. Given value MUST be of pointer type.
+ // May return an error indicating a read failure.
+ Read(interface{}, Decoder) error
+
+ // Sets the schema for this DatumReader to know the data structure.
+ // Note that it must be called before calling Read.
+ SetSchema(Schema)
+}
+
+var enumSymbolsToIndexCache = make(map[string]map[string]int32)
+var enumSymbolsToIndexCacheLock sync.Mutex
+
+// GenericEnum is a generic Avro enum representation. This is still subject to change and may be rethought.
+type GenericEnum struct {
+ // Avro enum symbols.
+ Symbols []string
+ symbolsToIndex map[string]int32
+ index int32
+}
+
+// NewGenericEnum returns a new GenericEnum that uses provided enum symbols.
+func NewGenericEnum(symbols []string) *GenericEnum {
+ symbolsToIndex := make(map[string]int32)
+ for index, symbol := range symbols {
+ symbolsToIndex[symbol] = int32(index)
+ }
+
+ return &GenericEnum{
+ Symbols: symbols,
+ symbolsToIndex: symbolsToIndex,
+ }
+}
+
+// GetIndex gets the numeric value for this enum.
+func (enum *GenericEnum) GetIndex() int32 {
+ return enum.index
+}
+
+// Get gets the string value for this enum (e.g. symbol).
+func (enum *GenericEnum) Get() string {
+ return enum.Symbols[enum.index]
+}
+
+// SetIndex sets the numeric value for this enum.
+func (enum *GenericEnum) SetIndex(index int32) {
+ enum.index = index
+}
+
+// Set sets the string value for this enum (e.g. symbol).
+// Panics if the given symbol does not exist in this enum.
+func (enum *GenericEnum) Set(symbol string) {
+ if index, exists := enum.symbolsToIndex[symbol]; !exists {
+ panic("Unknown enum symbol")
+ } else {
+ enum.index = index
+ }
+}
+
+// SpecificDatumReader implements DatumReader and is used for filling Go structs with data.
+// Each value passed to Read is expected to be a pointer.
+type SpecificDatumReader struct {
+ sDatumReader
+ schema Schema
+}
+
+// NewSpecificDatumReader creates a new SpecificDatumReader.
+func NewSpecificDatumReader() *SpecificDatumReader {
+ return &SpecificDatumReader{}
+}
+
+// SetSchema sets the schema for this SpecificDatumReader to know the data structure.
+// Note that it must be called before calling Read.
+func (reader *SpecificDatumReader) SetSchema(schema Schema) {
+ reader.schema = schema
+}
+
+// Read reads a single structured entry using this SpecificDatumReader.
+// Accepts a Go struct with exported fields to fill with data and a Decoder to read from. Given value MUST be of
+// pointer type. Field names should match field names in Avro schema but be exported (e.g. "some_value" in Avro
+// schema is expected to be Some_value in struct) or you may provide Go struct tags to explicitly show how
+// to map fields (e.g. if you want to map "some_value" field of type int to SomeValue in Go struct you should define
+// your struct field as follows: SomeValue int32 `avro:"some_field"`).
+// May return an error indicating a read failure.
+func (reader *SpecificDatumReader) Read(v interface{}, dec Decoder) error {
+ if reader, ok := v.(Reader); ok {
+ return reader.Read(dec)
+ }
+
+ rv := reflect.ValueOf(v)
+ if rv.Kind() != reflect.Ptr || rv.IsNil() {
+ return errors.New("Not applicable for non-pointer types or nil")
+ }
+ if reader.schema == nil {
+ return SchemaNotSet
+ }
+ return reader.fillRecord(reader.schema, rv, dec)
+}
+
+// It turns out that SpecificDatumReader as an instance is not needed
+// once you get started on the actual decoding. It seems at first like we're just saving
+// pointer passing but it actually means more, because now we don't need access to
+// the instance and can memoize the decoding functions easier/cheaper.
+type sDatumReader struct{}
+
+func (reader sDatumReader) findAndSet(v reflect.Value, field *SchemaField, dec Decoder) error {
+ structField, err := findField(v, field.Name)
+ if err != nil {
+ return err
+ }
+
+ value, err := reader.readValue(field.Type, structField, dec)
+ if err != nil {
+ return err
+ }
+
+ reader.setValue(field, structField, value)
+
+ return nil
+}
+
+func (reader sDatumReader) readValue(field Schema, reflectField reflect.Value, dec Decoder) (reflect.Value, error) {
+ switch field.Type() {
+ case Null:
+ return reflect.ValueOf(nil), nil
+ case Boolean:
+ return reader.mapPrimitive(func() (interface{}, error) { return dec.ReadBoolean() })
+ case Int:
+ return reader.mapPrimitive(func() (interface{}, error) { return dec.ReadInt() })
+ case Long:
+ return reader.mapPrimitive(func() (interface{}, error) { return dec.ReadLong() })
+ case Float:
+ return reader.mapPrimitive(func() (interface{}, error) { return dec.ReadFloat() })
+ case Double:
+ return reader.mapPrimitive(func() (interface{}, error) { return dec.ReadDouble() })
+ case Bytes:
+ return reader.mapPrimitive(func() (interface{}, error) { return dec.ReadBytes() })
+ case String:
+ return reader.mapPrimitive(func() (interface{}, error) { return dec.ReadString() })
+ case Array:
+ return reader.mapArray(field, reflectField, dec)
+ case Enum:
+ return reader.mapEnum(field, dec)
+ case Map:
+ return reader.mapMap(field, reflectField, dec)
+ case Union:
+ return reader.mapUnion(field, reflectField, dec)
+ case Fixed:
+ return reader.mapFixed(field, dec)
+ case Record:
+ return reader.mapRecord(field, reflectField, dec)
+ case Recursive:
+ return reader.mapRecord(field.(*RecursiveSchema).Actual, reflectField, dec)
+ }
+
+ return reflect.ValueOf(nil), fmt.Errorf("Unknown field type: %d", field.Type())
+}
+
+func (reader sDatumReader) setValue(field *SchemaField, where reflect.Value, what reflect.Value) {
+ zero := reflect.Value{}
+ if zero != what {
+ where.Set(what)
+ }
+}
+
+func (reader sDatumReader) mapPrimitive(readerFunc func() (interface{}, error)) (reflect.Value, error) {
+ value, err := readerFunc()
+ if err != nil {
+ return reflect.ValueOf(value), err
+ }
+
+ return reflect.ValueOf(value), nil
+}
+
+func (reader sDatumReader) mapArray(field Schema, reflectField reflect.Value, dec Decoder) (reflect.Value, error) {
+ arrayLength, err := dec.ReadArrayStart()
+ if err != nil {
+ return reflect.ValueOf(arrayLength), err
+ }
+
+ array := reflect.MakeSlice(reflectField.Type(), 0, 0)
+ pointer := reflectField.Type().Elem().Kind() == reflect.Ptr
+ for {
+ if arrayLength == 0 {
+ break
+ }
+
+ arrayPart := reflect.MakeSlice(reflectField.Type(), int(arrayLength), int(arrayLength))
+ var i int64
+ for ; i < arrayLength; i++ {
+ current := arrayPart.Index(int(i))
+ val, err := reader.readValue(field.(*ArraySchema).Items, current, dec)
+ if err != nil {
+ return reflect.ValueOf(arrayLength), err
+ }
+
+ // The only time `val` would not be valid is if it's an explicit null value.
+ // Since the default value is the zero value, we can simply just not set the value
+ if val.IsValid() {
+ if pointer && val.Kind() != reflect.Ptr {
+ val = val.Addr()
+ } else if !pointer && val.Kind() == reflect.Ptr {
+ val = val.Elem()
+ }
+ current.Set(val)
+ }
+ }
+ //concatenate arrays
+ if array.Len() == 0 {
+ array = arrayPart
+ } else {
+ array = reflect.AppendSlice(array, arrayPart)
+ }
+ arrayLength, err = dec.ArrayNext()
+ if err != nil {
+ return reflect.ValueOf(arrayLength), err
+ }
+ }
+ return array, nil
+}
+
+func (reader sDatumReader) mapMap(field Schema, reflectField reflect.Value, dec Decoder) (reflect.Value, error) {
+ mapLength, err := dec.ReadMapStart()
+ if err != nil {
+ return reflect.ValueOf(mapLength), err
+ }
+ elemType := reflectField.Type().Elem()
+ elemIsPointer := (elemType.Kind() == reflect.Ptr)
+ resultMap := reflect.MakeMap(reflectField.Type())
+
+ // dest is an element type value used as the destination for reading values into.
+ // This is required for using non-primitive types as map values, because map values are not addressable
+ // like array values are. It can be reused because it's scratch space and it's copied into the map.
+ dest := reflect.New(elemType).Elem()
+
+ for {
+ if mapLength == 0 {
+ break
+ }
+
+ var i int64
+ for ; i < mapLength; i++ {
+ key, err := reader.readValue(&StringSchema{}, reflectField, dec)
+ if err != nil {
+ return reflect.ValueOf(mapLength), err
+ }
+ val, err := reader.readValue(field.(*MapSchema).Values, dest, dec)
+ if err != nil {
+ return reflect.ValueOf(mapLength), nil
+ }
+ if !elemIsPointer && val.Kind() == reflect.Ptr {
+ resultMap.SetMapIndex(key, val.Elem())
+ } else {
+ resultMap.SetMapIndex(key, val)
+ }
+ }
+
+ mapLength, err = dec.MapNext()
+ if err != nil {
+ return reflect.ValueOf(mapLength), err
+ }
+ }
+ return resultMap, nil
+}
+
+func (reader sDatumReader) mapEnum(field Schema, dec Decoder) (reflect.Value, error) {
+ enumIndex, err := dec.ReadEnum()
+ if err != nil {
+ return reflect.ValueOf(enumIndex), err
+ }
+
+ schema := field.(*EnumSchema)
+ fullName := GetFullName(schema)
+
+ var symbolsToIndex map[string]int32
+ enumSymbolsToIndexCacheLock.Lock()
+ if symbolsToIndex = enumSymbolsToIndexCache[fullName]; symbolsToIndex == nil {
+ symbolsToIndex = NewGenericEnum(schema.Symbols).symbolsToIndex
+ enumSymbolsToIndexCache[fullName] = symbolsToIndex
+ }
+ enumSymbolsToIndexCacheLock.Unlock()
+
+ enum := &GenericEnum{
+ Symbols: schema.Symbols,
+ symbolsToIndex: symbolsToIndex,
+ index: enumIndex,
+ }
+ return reflect.ValueOf(enum), nil
+}
+
+func (reader sDatumReader) mapUnion(field Schema, reflectField reflect.Value, dec Decoder) (reflect.Value, error) {
+ unionType, err := dec.ReadInt()
+ if err != nil {
+ return reflect.ValueOf(unionType), err
+ }
+
+ union := field.(*UnionSchema).Types[unionType]
+ return reader.readValue(union, reflectField, dec)
+}
+
+func (reader sDatumReader) mapFixed(field Schema, dec Decoder) (reflect.Value, error) {
+ fixed := make([]byte, field.(*FixedSchema).Size)
+ if err := dec.ReadFixed(fixed); err != nil {
+ return reflect.ValueOf(fixed), err
+ }
+ return reflect.ValueOf(fixed), nil
+}
+
+func (reader sDatumReader) mapRecord(field Schema, reflectField reflect.Value, dec Decoder) (reflect.Value, error) {
+ var t reflect.Type
+ switch reflectField.Kind() {
+ case reflect.Ptr, reflect.Array, reflect.Map, reflect.Slice, reflect.Chan:
+ t = reflectField.Type().Elem()
+ default:
+ t = reflectField.Type()
+ }
+ record := reflect.New(t)
+ err := reader.fillRecord(field, record, dec)
+ return record, err
+}
+
+func (this sDatumReader) fillRecord(field Schema, record reflect.Value, dec Decoder) error {
+ if pf, ok := field.(*preparedRecordSchema); ok {
+ plan, err := pf.getPlan(record.Type().Elem())
+ if err != nil {
+ return err
+ }
+
+ rf := record.Elem()
+ for i := range plan.decodePlan {
+ entry := &plan.decodePlan[i]
+ structField := rf.FieldByIndex(entry.index)
+ value, err := entry.dec(structField, dec)
+
+ if err != nil {
+ return err
+ }
+ if value.IsValid() {
+ structField.Set(value)
+ }
+ }
+ } else {
+ recordSchema := field.(*RecordSchema)
+ //ri := record.Interface()
+ for i := 0; i < len(recordSchema.Fields); i++ {
+ this.findAndSet(record, recordSchema.Fields[i], dec)
+ }
+ }
+ return nil
+}
+
+// GenericDatumReader implements DatumReader and is used for filling GenericRecords or other Avro supported types
+// (full list is: interface{}, bool, int32, int64, float32, float64, string, slices of any type, maps with string keys
+// and any values, GenericEnums) with data.
+// Each value passed to Read is expected to be a pointer.
+type GenericDatumReader struct {
+ schema Schema
+}
+
+// NewGenericDatumReader creates a new GenericDatumReader.
+func NewGenericDatumReader() *GenericDatumReader {
+ return &GenericDatumReader{}
+}
+
+// SetSchema sets the schema for this GenericDatumReader to know the data structure.
+// Note that it must be called before calling Read.
+func (reader *GenericDatumReader) SetSchema(schema Schema) {
+ reader.schema = schema
+}
+
+// Read reads a single entry using this GenericDatumReader.
+// Accepts a value to fill with data and a Decoder to read from. Given value MUST be of pointer type.
+// May return an error indicating a read failure.
+func (reader *GenericDatumReader) Read(v interface{}, dec Decoder) error {
+ rv := reflect.ValueOf(v)
+ if rv.Kind() != reflect.Ptr || rv.IsNil() {
+ return errors.New("Not applicable for non-pointer types or nil")
+ }
+ rv = rv.Elem()
+ if reader.schema == nil {
+ return SchemaNotSet
+ }
+
+ //read the value
+ value, err := reader.readValue(reader.schema, dec)
+ if err != nil {
+ return err
+ }
+
+ newValue := reflect.ValueOf(value)
+ // dereference the value if needed
+ if newValue.Kind() == reflect.Ptr {
+ newValue = newValue.Elem()
+ }
+
+ //set the new value
+ rv.Set(newValue)
+
+ return nil
+}
+
+func (reader *GenericDatumReader) findAndSet(record *GenericRecord, field *SchemaField, dec Decoder) error {
+ value, err := reader.readValue(field.Type, dec)
+ if err != nil {
+ return err
+ }
+
+ switch typedValue := value.(type) {
+ case *GenericEnum:
+ if typedValue.GetIndex() >= int32(len(typedValue.Symbols)) {
+ return errors.New("Enum index invalid!")
+ }
+ record.Set(field.Name, typedValue.Symbols[typedValue.GetIndex()])
+
+ default:
+ record.Set(field.Name, value)
+ }
+
+ return nil
+}
+
+func (reader *GenericDatumReader) readValue(field Schema, dec Decoder) (interface{}, error) {
+ switch field.Type() {
+ case Null:
+ return nil, nil
+ case Boolean:
+ return dec.ReadBoolean()
+ case Int:
+ return dec.ReadInt()
+ case Long:
+ return dec.ReadLong()
+ case Float:
+ return dec.ReadFloat()
+ case Double:
+ return dec.ReadDouble()
+ case Bytes:
+ return dec.ReadBytes()
+ case String:
+ return dec.ReadString()
+ case Array:
+ return reader.mapArray(field, dec)
+ case Enum:
+ return reader.mapEnum(field, dec)
+ case Map:
+ return reader.mapMap(field, dec)
+ case Union:
+ return reader.mapUnion(field, dec)
+ case Fixed:
+ return reader.mapFixed(field, dec)
+ case Record:
+ return reader.mapRecord(field, dec)
+ case Recursive:
+ return reader.mapRecord(field.(*RecursiveSchema).Actual, dec)
+ }
+
+ return nil, fmt.Errorf("Unknown field type: %d", field.Type())
+}
+
+func (reader *GenericDatumReader) mapArray(field Schema, dec Decoder) ([]interface{}, error) {
+ arrayLength, err := dec.ReadArrayStart()
+ if err != nil {
+ return nil, err
+ }
+
+ var array []interface{}
+ for {
+ if arrayLength == 0 {
+ break
+ }
+ arrayPart := make([]interface{}, arrayLength, arrayLength)
+ var i int64
+ for ; i < arrayLength; i++ {
+ val, err := reader.readValue(field.(*ArraySchema).Items, dec)
+ if err != nil {
+ return nil, err
+ }
+ arrayPart[i] = val
+ }
+ //concatenate arrays
+ concatArray := make([]interface{}, len(array)+int(arrayLength), cap(array)+int(arrayLength))
+ copy(concatArray, array)
+ copy(concatArray, arrayPart)
+ array = concatArray
+ arrayLength, err = dec.ArrayNext()
+ if err != nil {
+ return nil, err
+ }
+ }
+ return array, nil
+}
+
+func (reader *GenericDatumReader) mapEnum(field Schema, dec Decoder) (*GenericEnum, error) {
+ enumIndex, err := dec.ReadEnum()
+ if err != nil {
+ return nil, err
+ }
+
+ schema := field.(*EnumSchema)
+ fullName := GetFullName(schema)
+
+ var symbolsToIndex map[string]int32
+ enumSymbolsToIndexCacheLock.Lock()
+ if symbolsToIndex = enumSymbolsToIndexCache[fullName]; symbolsToIndex == nil {
+ symbolsToIndex = NewGenericEnum(schema.Symbols).symbolsToIndex
+ enumSymbolsToIndexCache[fullName] = symbolsToIndex
+ }
+ enumSymbolsToIndexCacheLock.Unlock()
+
+ enum := &GenericEnum{
+ Symbols: schema.Symbols,
+ symbolsToIndex: symbolsToIndex,
+ index: enumIndex,
+ }
+ return enum, nil
+}
+
+func (reader *GenericDatumReader) mapMap(field Schema, dec Decoder) (map[string]interface{}, error) {
+ mapLength, err := dec.ReadMapStart()
+ if err != nil {
+ return nil, err
+ }
+
+ resultMap := make(map[string]interface{})
+ for {
+ if mapLength == 0 {
+ break
+ }
+ var i int64
+ for ; i < mapLength; i++ {
+ key, err := reader.readValue(&StringSchema{}, dec)
+ if err != nil {
+ return nil, err
+ }
+ val, err := reader.readValue(field.(*MapSchema).Values, dec)
+ if err != nil {
+ return nil, err
+ }
+ resultMap[key.(string)] = val
+ }
+
+ mapLength, err = dec.MapNext()
+ if err != nil {
+ return nil, err
+ }
+ }
+ return resultMap, nil
+}
+
+func (reader *GenericDatumReader) mapUnion(field Schema, dec Decoder) (interface{}, error) {
+ unionType, err := dec.ReadInt()
+ if err != nil {
+ return nil, err
+ }
+ if unionType >= 0 && unionType < int32(len(field.(*UnionSchema).Types)) {
+ union := field.(*UnionSchema).Types[unionType]
+ return reader.readValue(union, dec)
+ }
+
+ return nil, UnionTypeOverflow
+}
+
+func (reader *GenericDatumReader) mapFixed(field Schema, dec Decoder) ([]byte, error) {
+ fixed := make([]byte, field.(*FixedSchema).Size)
+ if err := dec.ReadFixed(fixed); err != nil {
+ return nil, err
+ }
+ return fixed, nil
+}
+
+func (reader *GenericDatumReader) mapRecord(field Schema, dec Decoder) (*GenericRecord, error) {
+ record := NewGenericRecord(field)
+
+ recordSchema := assertRecordSchema(field)
+ for i := 0; i < len(recordSchema.Fields); i++ {
+ err := reader.findAndSet(record, recordSchema.Fields[i], dec)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ return record, nil
+}
diff --git a/vendor/github.com/go-avro/avro/datum_utils.go b/vendor/github.com/go-avro/avro/datum_utils.go
new file mode 100644
index 00000000..4c3394d7
--- /dev/null
+++ b/vendor/github.com/go-avro/avro/datum_utils.go
@@ -0,0 +1,101 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+contributor license agreements. See the NOTICE file distributed with
+this work for additional information regarding copyright ownership.
+The ASF licenses this file to You under the Apache License, Version 2.0
+(the "License"); you may not use this file except in compliance with
+the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License. */
+
+package avro
+
+import (
+ "reflect"
+ "strings"
+ "sync"
+)
+
+func findField(where reflect.Value, name string) (reflect.Value, error) {
+ if where.Kind() == reflect.Ptr {
+ where = where.Elem()
+ }
+ rm := reflectEnsureRi(where.Type())
+ if rf, ok := rm.names[name]; ok {
+ return where.FieldByIndex(rf), nil
+ }
+ return reflect.Value{}, NewFieldDoesNotExistError(name)
+}
+
+func reflectEnsureRi(t reflect.Type) *reflectInfo {
+ reflectMapLock.RLock()
+ rm := reflectMap[t]
+ reflectMapLock.RUnlock()
+ if rm == nil {
+ rm = reflectBuildRi(t)
+ }
+ return rm
+}
+
+func reflectBuildRi(t reflect.Type) *reflectInfo {
+ rm := &reflectInfo{
+ names: make(map[string][]int),
+ }
+ rm.fill(t, nil)
+
+ reflectMapLock.Lock()
+ reflectMap[t] = rm
+ reflectMapLock.Unlock()
+ return rm
+}
+
+var reflectMap = make(map[reflect.Type]*reflectInfo)
+var reflectMapLock sync.RWMutex
+
+type reflectInfo struct {
+ names map[string][]int
+}
+
+// fill the given reflect info with the field names mapped.
+//
+// fill will recurse into anonymous structs incrementing the index prefix
+// so that untagged anonymous structs can be used as the source of truth.
+func (rm *reflectInfo) fill(t reflect.Type, indexPrefix []int) {
+ // simple infinite recursion preventer: stop when we are >10 deep.
+ if len(indexPrefix) > 10 {
+ return
+ }
+
+ fillName := func(tag string, idx []int) {
+ if _, ok := rm.names[tag]; !ok {
+ rm.names[tag] = idx
+ }
+ }
+ // these are anonymous structs to investigate (tail recursion)
+ var toInvestigate [][]int
+ for i := 0; i < t.NumField(); i++ {
+ f := t.Field(i)
+ tag := f.Tag.Get("avro")
+ idx := append(append([]int{}, indexPrefix...), f.Index...)
+
+ if f.Anonymous && tag == "" && f.Type.Kind() == reflect.Struct {
+ toInvestigate = append(toInvestigate, idx)
+ } else if strings.ToLower(f.Name[:1]) != f.Name[:1] {
+ if tag != "" {
+ fillName(tag, idx)
+ } else {
+ fillName(f.Name, idx)
+ fillName(strings.ToLower(f.Name[:1])+f.Name[1:], idx)
+ }
+ }
+ }
+ for _, idx := range toInvestigate {
+ // recurse into anonymous structs now that we handled the base ones.
+ rm.fill(t.Field(idx[len(idx)-1]).Type, idx)
+ }
+}
diff --git a/vendor/github.com/go-avro/avro/datum_writer.go b/vendor/github.com/go-avro/avro/datum_writer.go
new file mode 100644
index 00000000..43e4f20f
--- /dev/null
+++ b/vendor/github.com/go-avro/avro/datum_writer.go
@@ -0,0 +1,576 @@
+package avro
+
+import (
+ "errors"
+ "fmt"
+ "reflect"
+)
+
+// ***********************
+// NOTICE this file was changed beginning in November 2016 by the team maintaining
+// https://github.com/go-avro/avro. This notice is required to be here due to the
+// terms of the Apache license, see LICENSE for details.
+// ***********************
+
+// Writer is an interface that may be implemented to avoid using runtime reflection during serialization.
+// Implementing it is optional and may be used as an optimization. Falls back to using reflection if not implemented.
+type Writer interface {
+ Write(enc Encoder) error
+}
+
+// DatumWriter is an interface that is responsible for writing structured data according to schema to an encoder.
+type DatumWriter interface {
+ // Write writes a single entry using this DatumWriter according to provided Schema.
+ // Accepts a value to write and Encoder to write to.
+ // May return an error indicating a write failure.
+ Write(interface{}, Encoder) error
+
+ // Sets the schema for this DatumWriter to know the data structure.
+ // Note that it must be called before calling Write.
+ SetSchema(Schema)
+}
+
+// SpecificDatumWriter implements DatumWriter and is used for writing Go structs in Avro format.
+type SpecificDatumWriter struct {
+ schema Schema
+}
+
+// NewSpecificDatumWriter creates a new SpecificDatumWriter.
+func NewSpecificDatumWriter() *SpecificDatumWriter {
+ return &SpecificDatumWriter{}
+}
+
+// SetSchema sets the provided schema for this SpecificDatumWriter to know the data structure.
+// Note that it must be called before calling Write.
+func (writer *SpecificDatumWriter) SetSchema(schema Schema) {
+ writer.schema = schema
+}
+
+// Write writes a single Go struct using this SpecificDatumWriter according to provided Schema.
+// Accepts a value to write and Encoder to write to. Field names should match field names in Avro schema but be exported
+// (e.g. "some_value" in Avro schema is expected to be Some_value in struct) or you may provide Go struct tags to
+// explicitly show how to map fields (e.g. if you want to map "some_value" field of type int to SomeValue in Go struct
+// you should define your struct field as follows: SomeValue int32 `avro:"some_field"`).
+// May return an error indicating a write failure.
+func (writer *SpecificDatumWriter) Write(obj interface{}, enc Encoder) error {
+ if writer, ok := obj.(Writer); ok {
+ return writer.Write(enc)
+ }
+
+ rv := reflect.ValueOf(obj)
+
+ if writer.schema == nil {
+ return SchemaNotSet
+ }
+
+ return writer.write(rv, enc, writer.schema)
+}
+
+func (writer *SpecificDatumWriter) write(v reflect.Value, enc Encoder, s Schema) error {
+ switch s.Type() {
+ case Null:
+ case Boolean:
+ return writer.writeBoolean(v, enc, s)
+ case Int:
+ return writer.writeInt(v, enc, s)
+ case Long:
+ return writer.writeLong(v, enc, s)
+ case Float:
+ return writer.writeFloat(v, enc, s)
+ case Double:
+ return writer.writeDouble(v, enc, s)
+ case Bytes:
+ return writer.writeBytes(v, enc, s)
+ case String:
+ return writer.writeString(v, enc, s)
+ case Array:
+ return writer.writeArray(v, enc, s)
+ case Map:
+ return writer.writeMap(v, enc, s)
+ case Enum:
+ return writer.writeEnum(v, enc, s)
+ case Union:
+ return writer.writeUnion(v, enc, s)
+ case Fixed:
+ return writer.writeFixed(v, enc, s)
+ case Record:
+ return writer.writeRecord(v, enc, s)
+ case Recursive:
+ return writer.writeRecord(v, enc, s.(*RecursiveSchema).Actual)
+ }
+
+ return nil
+}
+
+func (writer *SpecificDatumWriter) writeBoolean(v reflect.Value, enc Encoder, s Schema) error {
+ if !s.Validate(v) {
+ return fmt.Errorf("Invalid boolean value: %v", v.Interface())
+ }
+
+ enc.WriteBoolean(v.Interface().(bool))
+ return nil
+}
+
+func (writer *SpecificDatumWriter) writeInt(v reflect.Value, enc Encoder, s Schema) error {
+ if !s.Validate(v) {
+ return fmt.Errorf("Invalid int value: %v", v.Interface())
+ }
+
+ enc.WriteInt(v.Interface().(int32))
+ return nil
+}
+
+func (writer *SpecificDatumWriter) writeLong(v reflect.Value, enc Encoder, s Schema) error {
+ if !s.Validate(v) {
+ return fmt.Errorf("Invalid long value: %v", v.Interface())
+ }
+
+ enc.WriteLong(v.Interface().(int64))
+ return nil
+}
+
+func (writer *SpecificDatumWriter) writeFloat(v reflect.Value, enc Encoder, s Schema) error {
+ if !s.Validate(v) {
+ return fmt.Errorf("Invalid float value: %v", v.Interface())
+ }
+
+ enc.WriteFloat(v.Interface().(float32))
+ return nil
+}
+
+func (writer *SpecificDatumWriter) writeDouble(v reflect.Value, enc Encoder, s Schema) error {
+ if !s.Validate(v) {
+ return fmt.Errorf("Invalid double value: %v", v.Interface())
+ }
+
+ enc.WriteDouble(v.Interface().(float64))
+ return nil
+}
+
+func (writer *SpecificDatumWriter) writeBytes(v reflect.Value, enc Encoder, s Schema) error {
+ if !s.Validate(v) {
+ return fmt.Errorf("Invalid bytes value: %v", v.Interface())
+ }
+
+ enc.WriteBytes(v.Interface().([]byte))
+ return nil
+}
+
+func (writer *SpecificDatumWriter) writeString(v reflect.Value, enc Encoder, s Schema) error {
+ if !s.Validate(v) {
+ return fmt.Errorf("Invalid string value: %v", v.Interface())
+ }
+
+ enc.WriteString(v.Interface().(string))
+ return nil
+}
+
+func (writer *SpecificDatumWriter) writeArray(v reflect.Value, enc Encoder, s Schema) error {
+ if !s.Validate(v) {
+ return fmt.Errorf("Invalid array value: %v", v.Interface())
+ }
+
+ if v.Len() == 0 {
+ enc.WriteArrayNext(0)
+ return nil
+ }
+
+ //TODO should probably write blocks of some length
+ enc.WriteArrayStart(int64(v.Len()))
+ for i := 0; i < v.Len(); i++ {
+ if err := writer.write(v.Index(i), enc, s.(*ArraySchema).Items); err != nil {
+ return err
+ }
+ }
+ enc.WriteArrayNext(0)
+
+ return nil
+}
+
+func (writer *SpecificDatumWriter) writeMap(v reflect.Value, enc Encoder, s Schema) error {
+ if !s.Validate(v) {
+ return fmt.Errorf("Invalid map value: %v", v.Interface())
+ }
+
+ if v.Len() == 0 {
+ enc.WriteMapNext(0)
+ return nil
+ }
+ //TODO should probably write blocks of some length
+ enc.WriteMapStart(int64(v.Len()))
+ for _, key := range v.MapKeys() {
+ err := writer.writeString(key, enc, &StringSchema{})
+ if err != nil {
+ return err
+ }
+ if err = writer.write(v.MapIndex(key), enc, s.(*MapSchema).Values); err != nil {
+ return err
+ }
+ }
+ enc.WriteMapNext(0)
+
+ return nil
+}
+
+func (writer *SpecificDatumWriter) writeEnum(v reflect.Value, enc Encoder, s Schema) error {
+ if !s.Validate(v) {
+ return fmt.Errorf("Invalid enum value: %v", v.Interface())
+ }
+
+ enc.WriteInt(v.Interface().(*GenericEnum).GetIndex())
+
+ return nil
+}
+
+func (writer *SpecificDatumWriter) writeUnion(v reflect.Value, enc Encoder, s Schema) error {
+ unionSchema := s.(*UnionSchema)
+ index := unionSchema.GetType(v)
+
+ if unionSchema.Types == nil || index < 0 || index >= len(unionSchema.Types) {
+ return fmt.Errorf("Invalid union value: %v", v.Interface())
+ }
+
+ enc.WriteLong(int64(index))
+ return writer.write(v, enc, unionSchema.Types[index])
+}
+
+func (writer *SpecificDatumWriter) writeFixed(v reflect.Value, enc Encoder, s Schema) error {
+ fs := s.(*FixedSchema)
+
+ if !fs.Validate(v) {
+ return fmt.Errorf("Invalid fixed value: %v", v.Interface())
+ }
+
+ // Write the raw bytes. The length is known by the schema
+ enc.WriteRaw(v.Interface().([]byte))
+ return nil
+}
+
+func (writer *SpecificDatumWriter) writeRecord(v reflect.Value, enc Encoder, s Schema) error {
+ if !s.Validate(v) {
+ return fmt.Errorf("Invalid record value: %v", v.Interface())
+ }
+
+ rs := assertRecordSchema(s)
+ for i := range rs.Fields {
+ schemaField := rs.Fields[i]
+ field, err := findField(v, schemaField.Name)
+ if err != nil {
+ return err
+ }
+ if err := writer.write(field, enc, schemaField.Type); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+// GenericDatumWriter implements DatumWriter and is used for writing GenericRecords or other Avro supported types
+// (full list is: interface{}, bool, int32, int64, float32, float64, string, slices of any type, maps with string keys
+// and any values, GenericEnums) to a given Encoder.
+type GenericDatumWriter struct {
+ schema Schema
+}
+
+// NewGenericDatumWriter creates a new GenericDatumWriter.
+func NewGenericDatumWriter() *GenericDatumWriter {
+ return &GenericDatumWriter{}
+}
+
+// SetSchema sets the provided schema for this GenericDatumWriter to know the data structure.
+// Note that it must be called before calling Write.
+func (writer *GenericDatumWriter) SetSchema(schema Schema) {
+ writer.schema = schema
+}
+
+// Write writes a single entry using this GenericDatumWriter according to provided Schema.
+// Accepts a value to write and Encoder to write to.
+// May return an error indicating a write failure.
+func (writer *GenericDatumWriter) Write(obj interface{}, enc Encoder) error {
+ return writer.write(obj, enc, writer.schema)
+}
+
+func (writer *GenericDatumWriter) write(v interface{}, enc Encoder, s Schema) error {
+ switch s.Type() {
+ case Null:
+ case Boolean:
+ return writer.writeBoolean(v, enc)
+ case Int:
+ return writer.writeInt(v, enc)
+ case Long:
+ return writer.writeLong(v, enc)
+ case Float:
+ return writer.writeFloat(v, enc)
+ case Double:
+ return writer.writeDouble(v, enc)
+ case Bytes:
+ return writer.writeBytes(v, enc)
+ case String:
+ return writer.writeString(v, enc)
+ case Array:
+ return writer.writeArray(v, enc, s)
+ case Map:
+ return writer.writeMap(v, enc, s)
+ case Enum:
+ return writer.writeEnum(v, enc, s)
+ case Union:
+ return writer.writeUnion(v, enc, s)
+ case Fixed:
+ return writer.writeFixed(v, enc, s)
+ case Record:
+ return writer.writeRecord(v, enc, s)
+ case Recursive:
+ return writer.writeRecord(v, enc, s.(*RecursiveSchema).Actual)
+ }
+
+ return nil
+}
+
+func (writer *GenericDatumWriter) writeBoolean(v interface{}, enc Encoder) error {
+ switch value := v.(type) {
+ case bool:
+ enc.WriteBoolean(value)
+ default:
+ return fmt.Errorf("%v is not a boolean", v)
+ }
+
+ return nil
+}
+
+func (writer *GenericDatumWriter) writeInt(v interface{}, enc Encoder) error {
+ switch value := v.(type) {
+ case int32:
+ enc.WriteInt(value)
+ default:
+ return fmt.Errorf("%v is not an int32", v)
+ }
+
+ return nil
+}
+
+func (writer *GenericDatumWriter) writeLong(v interface{}, enc Encoder) error {
+ switch value := v.(type) {
+ case int64:
+ enc.WriteLong(value)
+ default:
+ return fmt.Errorf("%v is not an int64", v)
+ }
+
+ return nil
+}
+
+func (writer *GenericDatumWriter) writeFloat(v interface{}, enc Encoder) error {
+ switch value := v.(type) {
+ case float32:
+ enc.WriteFloat(value)
+ default:
+ return fmt.Errorf("%v is not a float32", v)
+ }
+
+ return nil
+}
+
+func (writer *GenericDatumWriter) writeDouble(v interface{}, enc Encoder) error {
+ switch value := v.(type) {
+ case float64:
+ enc.WriteDouble(value)
+ default:
+ return fmt.Errorf("%v is not a float64", v)
+ }
+
+ return nil
+}
+
+func (writer *GenericDatumWriter) writeBytes(v interface{}, enc Encoder) error {
+ switch value := v.(type) {
+ case []byte:
+ enc.WriteBytes(value)
+ default:
+ return fmt.Errorf("%v is not a []byte", v)
+ }
+
+ return nil
+}
+
+func (writer *GenericDatumWriter) writeString(v interface{}, enc Encoder) error {
+ switch value := v.(type) {
+ case string:
+ enc.WriteString(value)
+ default:
+ return fmt.Errorf("%v is not a string", v)
+ }
+
+ return nil
+}
+
+func (writer *GenericDatumWriter) writeArray(v interface{}, enc Encoder, s Schema) error {
+ rv := reflect.ValueOf(v)
+ if rv.Kind() != reflect.Slice && rv.Kind() != reflect.Array {
+ return errors.New("Not a slice or array type")
+ }
+
+ if rv.Len() == 0 {
+ enc.WriteArrayNext(0)
+ return nil
+ }
+
+ //TODO should probably write blocks of some length
+ enc.WriteArrayStart(int64(rv.Len()))
+ for i := 0; i < rv.Len(); i++ {
+ err := writer.write(rv.Index(i).Interface(), enc, s.(*ArraySchema).Items)
+ if err != nil {
+ return err
+ }
+ }
+ enc.WriteArrayNext(0)
+
+ return nil
+}
+
+func (writer *GenericDatumWriter) writeMap(v interface{}, enc Encoder, s Schema) error {
+ rv := reflect.ValueOf(v)
+ if rv.Kind() != reflect.Map {
+ return errors.New("Not a map type")
+ }
+
+ if rv.Len() == 0 {
+ enc.WriteMapNext(0)
+ return nil
+ }
+
+ //TODO should probably write blocks of some length
+ enc.WriteMapStart(int64(rv.Len()))
+ for _, key := range rv.MapKeys() {
+ err := writer.writeString(key.Interface(), enc)
+ if err != nil {
+ return err
+ }
+ err = writer.write(rv.MapIndex(key).Interface(), enc, s.(*MapSchema).Values)
+ if err != nil {
+ return err
+ }
+ }
+ enc.WriteMapNext(0)
+
+ return nil
+}
+
+func (writer *GenericDatumWriter) writeEnum(v interface{}, enc Encoder, s Schema) error {
+ switch v.(type) {
+ case *GenericEnum:
+ {
+ rs := s.(*EnumSchema)
+ for i := range rs.Symbols {
+ if rs.Name == rs.Symbols[i] {
+ err := writer.writeInt(i, enc)
+ if err != nil {
+ return err
+ }
+ break
+ }
+ }
+ }
+ case string:
+ {
+ rs := s.(*EnumSchema)
+ for i := range rs.Symbols {
+ if v.(string) == rs.Symbols[i] {
+ enc.WriteInt(int32(i))
+ break
+ }
+ }
+ }
+ default:
+ return fmt.Errorf("%v is not a *GenericEnum", v)
+ }
+
+ return nil
+}
+
+func (writer *GenericDatumWriter) writeUnion(v interface{}, enc Encoder, s Schema) error {
+ unionSchema := s.(*UnionSchema)
+
+ index := unionSchema.GetType(reflect.ValueOf(v))
+ if index != -1 {
+ enc.WriteInt(int32(index))
+ return writer.write(v, enc, unionSchema.Types[index])
+ }
+
+ return fmt.Errorf("Could not write %v as %s", v, s)
+}
+
+func (writer *GenericDatumWriter) isWritableAs(v interface{}, s Schema) bool {
+ ok := false
+ switch s.(type) {
+ case *NullSchema:
+ return v == nil
+ case *BooleanSchema:
+ _, ok = v.(bool)
+ case *IntSchema:
+ _, ok = v.(int32)
+ case *LongSchema:
+ _, ok = v.(int64)
+ case *FloatSchema:
+ _, ok = v.(float32)
+ case *DoubleSchema:
+ _, ok = v.(float64)
+ case *StringSchema:
+ _, ok = v.(string)
+ case *BytesSchema:
+ _, ok = v.([]byte)
+ case *ArraySchema:
+ {
+ kind := reflect.ValueOf(v).Kind()
+ return kind == reflect.Array || kind == reflect.Slice
+ }
+ case *MapSchema:
+ return reflect.ValueOf(v).Kind() == reflect.Map
+ case *EnumSchema:
+ _, ok = v.(*GenericEnum)
+ case *UnionSchema:
+ panic("Nested unions not supported") //this is a part of spec: http://avro.apache.org/docs/current/spec.html#binary_encode_complex
+ case *RecordSchema:
+ _, ok = v.(*GenericRecord)
+ case *preparedRecordSchema:
+ _, ok = v.(*GenericRecord)
+ }
+
+ return ok
+}
+
+func (writer *GenericDatumWriter) writeFixed(v interface{}, enc Encoder, s Schema) error {
+ fs := s.(*FixedSchema)
+
+ if !fs.Validate(reflect.ValueOf(v)) {
+ return fmt.Errorf("Invalid fixed value: %v", v)
+ }
+
+ // Write the raw bytes. The length is known by the schema
+ enc.WriteRaw(v.([]byte))
+ return nil
+}
+
+func (writer *GenericDatumWriter) writeRecord(v interface{}, enc Encoder, s Schema) error {
+ switch value := v.(type) {
+ case *GenericRecord:
+ {
+ rs := assertRecordSchema(s)
+ for i := range rs.Fields {
+ schemaField := rs.Fields[i]
+ field := value.Get(schemaField.Name)
+ if field == nil {
+ field = schemaField.Default
+ }
+ err := writer.write(field, enc, schemaField.Type)
+ if err != nil {
+ return err
+ }
+ }
+ }
+ default:
+ return fmt.Errorf("%v is not a *GenericRecord", v)
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/go-avro/avro/decoder.go b/vendor/github.com/go-avro/avro/decoder.go
new file mode 100644
index 00000000..df12c497
--- /dev/null
+++ b/vendor/github.com/go-avro/avro/decoder.go
@@ -0,0 +1,337 @@
+package avro
+
+import (
+ "encoding/binary"
+ "math"
+)
+
+// Decoder is an interface that provides low-level support for deserializing Avro values.
+type Decoder interface {
+ // Reads a null value. Returns a decoded value and an error if it occurs.
+ ReadNull() (interface{}, error)
+
+ // Reads a boolean value. Returns a decoded value and an error if it occurs.
+ ReadBoolean() (bool, error)
+
+ // Reads an in value. Returns a decoded value and an error if it occurs.
+ ReadInt() (int32, error)
+
+ // Reads a long value. Returns a decoded value and an error if it occurs.
+ ReadLong() (int64, error)
+
+ // Reads a float value. Returns a decoded value and an error if it occurs.
+ ReadFloat() (float32, error)
+
+ // Reads a double value. Returns a decoded value and an error if it occurs.
+ ReadDouble() (float64, error)
+
+ // Reads a bytes value. Returns a decoded value and an error if it occurs.
+ ReadBytes() ([]byte, error)
+
+ // Reads a string value. Returns a decoded value and an error if it occurs.
+ ReadString() (string, error)
+
+ // Reads an enum value (which is an Avro int value). Returns a decoded value and an error if it occurs.
+ ReadEnum() (int32, error)
+
+ // Reads and returns the size of the first block of an array. If call to this return non-zero, then the caller
+ // should read the indicated number of items and then call ArrayNext() to find out the number of items in the
+ // next block. Returns a decoded value and an error if it occurs.
+ ReadArrayStart() (int64, error)
+
+ // Processes the next block of an array and returns the number of items in the block.
+ // Returns a decoded value and an error if it occurs.
+ ArrayNext() (int64, error)
+
+ // Reads and returns the size of the first block of map entries. If call to this return non-zero, then the caller
+ // should read the indicated number of items and then call MapNext() to find out the number of items in the
+ // next block. Usage is similar to ReadArrayStart(). Returns a decoded value and an error if it occurs.
+ ReadMapStart() (int64, error)
+
+ // Processes the next block of map entries and returns the number of items in the block.
+ // Returns a decoded value and an error if it occurs.
+ MapNext() (int64, error)
+
+ // Reads fixed sized binary object into the provided buffer.
+ // Returns an error if it occurs.
+ ReadFixed([]byte) error
+
+ // Reads fixed sized binary object into the provided buffer.
+ // The second parameter is the position where the data needs to be written, the third is the size of binary object.
+ // Returns an error if it occurs.
+ ReadFixedWithBounds([]byte, int, int) error
+
+ // SetBlock is used for Avro Object Container Files where the data is split in blocks and sets a data block
+ // for this decoder and sets the position to the start of this block.
+ SetBlock(*DataBlock)
+
+ // Seek sets the reading position of this Decoder to a given value allowing to skip items etc.
+ Seek(int64)
+
+ // Tell returns the current reading position of this Decoder.
+ Tell() int64
+}
+
+// DataBlock is a structure that holds a certain amount of entries and the actual buffer to read from.
+type DataBlock struct {
+ // Actual data
+ Data []byte
+
+ // Number of entries encoded in Data.
+ NumEntries int64
+
+ // Size of data buffer in bytes.
+ BlockSize int
+
+ // Number of unread entries in this DataBlock.
+ BlockRemaining int64
+}
+
+var maxIntBufSize = 5
+var maxLongBufSize = 10
+
+// BinaryDecoder implements Decoder and provides low-level support for deserializing Avro values.
+type BinaryDecoder struct {
+ buf []byte
+ pos int64
+}
+
+// NewBinaryDecoder creates a new BinaryDecoder to read from a given buffer.
+func NewBinaryDecoder(buf []byte) *BinaryDecoder {
+ return &BinaryDecoder{buf, 0}
+}
+
+// ReadNull reads a null value. Returns a decoded value and an error if it occurs.
+func (bd *BinaryDecoder) ReadNull() (interface{}, error) {
+ return nil, nil
+}
+
+// ReadInt reads an int value. Returns a decoded value and an error if it occurs.
+func (bd *BinaryDecoder) ReadInt() (int32, error) {
+ if err := checkEOF(bd.buf, bd.pos, 1); err != nil {
+ return 0, EOF
+ }
+ var value uint32
+ var b uint8
+ var offset int
+ bufLen := int64(len(bd.buf))
+
+ for {
+ if offset == maxIntBufSize {
+ return 0, IntOverflow
+ }
+
+ if bd.pos >= bufLen {
+ return 0, InvalidInt
+ }
+
+ b = bd.buf[bd.pos]
+ value |= uint32(b&0x7F) << uint(7*offset)
+ bd.pos++
+ offset++
+ if b&0x80 == 0 {
+ break
+ }
+ }
+ return int32((value >> 1) ^ -(value & 1)), nil
+}
+
+// ReadLong reads a long value. Returns a decoded value and an error if it occurs.
+func (bd *BinaryDecoder) ReadLong() (int64, error) {
+ var value uint64
+ var b uint8
+ var offset int
+ bufLen := int64(len(bd.buf))
+
+ for {
+ if offset == maxLongBufSize {
+ return 0, LongOverflow
+ }
+
+ if bd.pos >= bufLen {
+ return 0, InvalidLong
+ }
+
+ b = bd.buf[bd.pos]
+ value |= uint64(b&0x7F) << uint(7*offset)
+ bd.pos++
+ offset++
+
+ if b&0x80 == 0 {
+ break
+ }
+ }
+ return int64((value >> 1) ^ -(value & 1)), nil
+}
+
+// ReadString reads a string value. Returns a decoded value and an error if it occurs.
+func (bd *BinaryDecoder) ReadString() (string, error) {
+ if err := checkEOF(bd.buf, bd.pos, 1); err != nil {
+ return "", err
+ }
+ length, err := bd.ReadLong()
+ if err != nil || length < 0 {
+ return "", InvalidStringLength
+ }
+ if err := checkEOF(bd.buf, bd.pos, int(length)); err != nil {
+ return "", err
+ }
+ value := string(bd.buf[bd.pos : bd.pos+length])
+ bd.pos += length
+ return value, nil
+}
+
+// ReadBoolean reads a boolean value. Returns a decoded value and an error if it occurs.
+func (bd *BinaryDecoder) ReadBoolean() (bool, error) {
+ b := bd.buf[bd.pos] & 0xFF
+ bd.pos++
+ var err error
+ if b != 0 && b != 1 {
+ err = InvalidBool
+ }
+ return b == 1, err
+}
+
+// ReadBytes reads a bytes value. Returns a decoded value and an error if it occurs.
+func (bd *BinaryDecoder) ReadBytes() ([]byte, error) {
+ //TODO make something with these if's!!
+ if err := checkEOF(bd.buf, bd.pos, 1); err != nil {
+ return nil, EOF
+ }
+ length, err := bd.ReadLong()
+ if err != nil {
+ return nil, err
+ }
+ if length < 0 {
+ return nil, NegativeBytesLength
+ }
+ if err = checkEOF(bd.buf, bd.pos, int(length)); err != nil {
+ return nil, EOF
+ }
+
+ bytes := make([]byte, length)
+ copy(bytes[:], bd.buf[bd.pos:bd.pos+length])
+ bd.pos += length
+ return bytes, err
+}
+
+// ReadFloat reads a float value. Returns a decoded value and an error if it occurs.
+func (bd *BinaryDecoder) ReadFloat() (float32, error) {
+ var float float32
+ if err := checkEOF(bd.buf, bd.pos, 4); err != nil {
+ return float, err
+ }
+ bits := binary.LittleEndian.Uint32(bd.buf[bd.pos : bd.pos+4])
+ float = math.Float32frombits(bits)
+ bd.pos += 4
+ return float, nil
+}
+
+// ReadDouble reads a double value. Returns a decoded value and an error if it occurs.
+func (bd *BinaryDecoder) ReadDouble() (float64, error) {
+ var double float64
+ if err := checkEOF(bd.buf, bd.pos, 8); err != nil {
+ return double, err
+ }
+ bits := binary.LittleEndian.Uint64(bd.buf[bd.pos : bd.pos+8])
+ double = math.Float64frombits(bits)
+ bd.pos += 8
+ return double, nil
+}
+
+// ReadEnum reads an enum value (which is an Avro int value). Returns a decoded value and an error if it occurs.
+func (bd *BinaryDecoder) ReadEnum() (int32, error) {
+ return bd.ReadInt()
+}
+
+// ReadArrayStart reads and returns the size of the first block of an array. If call to this return non-zero, then the caller
+// should read the indicated number of items and then call ArrayNext() to find out the number of items in the
+// next block. Returns a decoded value and an error if it occurs.
+func (bd *BinaryDecoder) ReadArrayStart() (int64, error) {
+ return bd.readItemCount()
+}
+
+// ArrayNext processes the next block of an array and returns the number of items in the block.
+// Returns a decoded value and an error if it occurs.
+func (bd *BinaryDecoder) ArrayNext() (int64, error) {
+ return bd.readItemCount()
+}
+
+// ReadMapStart reads and returns the size of the first block of map entries. If call to this return non-zero, then the caller
+// should read the indicated number of items and then call MapNext() to find out the number of items in the
+// next block. Usage is similar to ReadArrayStart(). Returns a decoded value and an error if it occurs.
+func (bd *BinaryDecoder) ReadMapStart() (int64, error) {
+ return bd.readItemCount()
+}
+
+// MapNext processes the next block of map entries and returns the number of items in the block.
+// Returns a decoded value and an error if it occurs.
+func (bd *BinaryDecoder) MapNext() (int64, error) {
+ return bd.readItemCount()
+}
+
+// ReadFixed reads fixed sized binary object into the provided buffer.
+// Returns an error if it occurs.
+func (bd *BinaryDecoder) ReadFixed(bytes []byte) error {
+ return bd.readBytes(bytes, 0, len(bytes))
+}
+
+// ReadFixedWithBounds reads fixed sized binary object into the provided buffer.
+// The second parameter is the position where the data needs to be written, the third is the size of binary object.
+// Returns an error if it occurs.
+func (bd *BinaryDecoder) ReadFixedWithBounds(bytes []byte, start int, length int) error {
+ return bd.readBytes(bytes, start, length)
+}
+
+// SetBlock is used for Avro Object Container Files where the data is split in blocks and sets a data block
+// for this decoder and sets the position to the start of this block.
+func (bd *BinaryDecoder) SetBlock(block *DataBlock) {
+ bd.buf = block.Data
+ bd.Seek(0)
+}
+
+// Seek sets the reading position of this Decoder to a given value allowing to skip items etc.
+func (bd *BinaryDecoder) Seek(pos int64) {
+ bd.pos = pos
+}
+
+// Tell returns the current reading position of this Decoder.
+func (bd *BinaryDecoder) Tell() int64 {
+ return bd.pos
+}
+
+func checkEOF(buf []byte, pos int64, length int) error {
+ if int64(len(buf)) < pos+int64(length) {
+ return EOF
+ }
+ return nil
+}
+
+func (bd *BinaryDecoder) readItemCount() (int64, error) {
+ count, err := bd.ReadLong()
+ if err != nil {
+ return 0, err
+ }
+
+ if count < 0 {
+ _, err = bd.ReadLong()
+ if err != nil {
+ return 0, err
+ }
+ count = -count
+ }
+ return count, err
+}
+
+func (bd *BinaryDecoder) readBytes(bytes []byte, start int, length int) error {
+ if length < 0 {
+ return NegativeBytesLength
+ }
+ if err := checkEOF(bd.buf, bd.pos, int(start+length)); err != nil {
+ return EOF
+ }
+ copy(bytes[:], bd.buf[bd.pos+int64(start):bd.pos+int64(start)+int64(length)])
+ bd.pos += int64(length)
+
+ return nil
+}
diff --git a/vendor/github.com/go-avro/avro/encoder.go b/vendor/github.com/go-avro/avro/encoder.go
new file mode 100644
index 00000000..23a080f2
--- /dev/null
+++ b/vendor/github.com/go-avro/avro/encoder.go
@@ -0,0 +1,186 @@
+package avro
+
+import (
+ "encoding/binary"
+ "io"
+ "math"
+)
+
+// Encoder is an interface that provides low-level support for serializing Avro values.
+type Encoder interface {
+ // Writes a null value. Doesn't actually do anything but may advance the state of Encoder implementation if it
+ // is stateful.
+ WriteNull(interface{})
+
+ // Writes a boolean value.
+ WriteBoolean(bool)
+
+ // Writes an int value.
+ WriteInt(int32)
+
+ // Writes a long value.
+ WriteLong(int64)
+
+ // Writes a float value.
+ WriteFloat(float32)
+
+ // Writes a double value.
+ WriteDouble(float64)
+
+ // Writes a bytes value.
+ WriteBytes([]byte)
+
+ // Writes a string value.
+ WriteString(string)
+
+ // WriteArrayStart should be called when starting to serialize an array providing it with a number of items in
+ // array block.
+ WriteArrayStart(int64)
+
+ // WriteArrayNext should be called after finishing writing an array block either passing it the number of items in
+ // next block or 0 indicating the end of array.
+ WriteArrayNext(int64)
+
+ // WriteMapStart should be called when starting to serialize a map providing it with a number of items in
+ // map block.
+ WriteMapStart(int64)
+
+ // WriteMapNext should be called after finishing writing a map block either passing it the number of items in
+ // next block or 0 indicating the end of map.
+ WriteMapNext(int64)
+
+ // Writes raw bytes to this Encoder.
+ WriteRaw([]byte)
+}
+
+// BinaryEncoder implements Encoder and provides low-level support for serializing Avro values.
+type BinaryEncoder struct {
+ buffer io.Writer
+}
+
+// NewBinaryEncoder creates a new BinaryEncoder that will write to a given io.Writer.
+func NewBinaryEncoder(buffer io.Writer) *BinaryEncoder {
+ return &BinaryEncoder{buffer: buffer}
+}
+
+// WriteNull writes a null value. Doesn't actually do anything in this implementation.
+func (be *BinaryEncoder) WriteNull(_ interface{}) {
+ //do nothing
+}
+
+// The encodings of true and false, for reuse
+var encBoolTrue = []byte{0x01}
+var encBoolFalse = []byte{0x00}
+
+// WriteBoolean writes a boolean value.
+func (be *BinaryEncoder) WriteBoolean(x bool) {
+ if x {
+ _, _ = be.buffer.Write(encBoolTrue)
+ } else {
+ _, _ = be.buffer.Write(encBoolFalse)
+ }
+}
+
+// WriteInt writes an int value.
+func (be *BinaryEncoder) WriteInt(x int32) {
+ _, _ = be.buffer.Write(be.encodeVarint32(x))
+}
+
+// WriteLong writes a long value.
+func (be *BinaryEncoder) WriteLong(x int64) {
+ _, _ = be.buffer.Write(be.encodeVarint64(x))
+}
+
+// WriteFloat writes a float value.
+func (be *BinaryEncoder) WriteFloat(x float32) {
+ bytes := make([]byte, 4)
+ binary.LittleEndian.PutUint32(bytes, math.Float32bits(x))
+ _, _ = be.buffer.Write(bytes)
+}
+
+// WriteDouble writes a double value.
+func (be *BinaryEncoder) WriteDouble(x float64) {
+ bytes := make([]byte, 8)
+ binary.LittleEndian.PutUint64(bytes, math.Float64bits(x))
+ _, _ = be.buffer.Write(bytes)
+}
+
+// WriteRaw writes raw bytes to this Encoder.
+func (be *BinaryEncoder) WriteRaw(x []byte) {
+ _, _ = be.buffer.Write(x)
+}
+
+// WriteBytes writes a bytes value.
+func (be *BinaryEncoder) WriteBytes(x []byte) {
+ be.WriteLong(int64(len(x)))
+ _, _ = be.buffer.Write(x)
+}
+
+// WriteString writes a string value.
+func (be *BinaryEncoder) WriteString(x string) {
+ be.WriteLong(int64(len(x)))
+ // call writers that happen to provide WriteString to avoid extra byte allocations for a copy of a string when possible.
+ _, _ = io.WriteString(be.buffer, x)
+}
+
+// WriteArrayStart should be called when starting to serialize an array providing it with a number of items in
+// array block.
+func (be *BinaryEncoder) WriteArrayStart(count int64) {
+ be.writeItemCount(count)
+}
+
+// WriteArrayNext should be called after finishing writing an array block either passing it the number of items in
+// next block or 0 indicating the end of array.
+func (be *BinaryEncoder) WriteArrayNext(count int64) {
+ be.writeItemCount(count)
+}
+
+// WriteMapStart should be called when starting to serialize a map providing it with a number of items in
+// map block.
+func (be *BinaryEncoder) WriteMapStart(count int64) {
+ be.writeItemCount(count)
+}
+
+// WriteMapNext should be called after finishing writing a map block either passing it the number of items in
+// next block or 0 indicating the end of map.
+func (be *BinaryEncoder) WriteMapNext(count int64) {
+ be.writeItemCount(count)
+}
+
+func (be *BinaryEncoder) writeItemCount(count int64) {
+ be.WriteLong(count)
+}
+
+func (be *BinaryEncoder) encodeVarint32(n int32) []byte {
+ var buf [5]byte
+ ux := uint32(n) << 1
+ if n < 0 {
+ ux = ^ux
+ }
+ i := 0
+ for ux >= 0x80 {
+ buf[i] = byte(ux) | 0x80
+ ux >>= 7
+ i++
+ }
+ buf[i] = byte(ux)
+
+ return buf[0 : i+1]
+}
+
+func (be *BinaryEncoder) encodeVarint64(x int64) []byte {
+ var buf [10]byte
+ ux := uint64(x) << 1
+ if x < 0 {
+ ux = ^ux
+ }
+ i := 0
+ for ux >= 0x80 {
+ buf[i] = byte(ux) | 0x80
+ ux >>= 7
+ i++
+ }
+ buf[i] = byte(ux)
+
+ return buf[0 : i+1]
+}
diff --git a/vendor/github.com/go-avro/avro/errors.go b/vendor/github.com/go-avro/avro/errors.go
new file mode 100644
index 00000000..5d2ecd78
--- /dev/null
+++ b/vendor/github.com/go-avro/avro/errors.go
@@ -0,0 +1,62 @@
+package avro
+
+import (
+ "errors"
+ "fmt"
+)
+
+// Signals that an end of file or stream has been reached unexpectedly.
+var EOF = errors.New("End of file reached")
+
+// Happens when the given value to decode overflows maximum int32 value.
+var IntOverflow = errors.New("Overflowed an int value")
+
+// Happens when the given value to decode overflows maximum int64 value.
+var LongOverflow = errors.New("Overflowed a long value")
+
+// Happens when given value to decode as bytes has negative length.
+var NegativeBytesLength = errors.New("Negative bytes length")
+
+// Happens when given value to decode as bool is neither 0x00 nor 0x01.
+var InvalidBool = errors.New("Invalid bool value")
+
+// Happens when given value to decode as a int is invalid
+var InvalidInt = errors.New("Invalid int value")
+
+// Happens when given value to decode as a long is invalid
+var InvalidLong = errors.New("Invalid long value")
+
+// Happens when given value to decode as string has either negative or undecodable length.
+var InvalidStringLength = errors.New("Invalid string length")
+
+// Indicates the given file to decode does not correspond to Avro data file format.
+var NotAvroFile = errors.New("Not an Avro data file")
+
+// Happens when file header's sync and block's sync do not match - indicates corrupted data.
+var InvalidSync = errors.New("Invalid sync")
+
+// Happens when trying to read next block without finishing the previous one.
+var BlockNotFinished = errors.New("Block read is unfinished")
+
+// Happens when avro schema contains invalid value for fixed size.
+var InvalidFixedSize = errors.New("Invalid Fixed type size")
+
+// Happens when avro schema contains invalid value for map value type or array item type.
+var InvalidValueType = errors.New("Invalid array or map value type")
+
+// Happens when avro schema contains a union within union.
+var NestedUnionsNotAllowed = errors.New("Nested unions are not allowed")
+
+// UnionTypeOverflow happens when the numeric index of the union type is invalid.
+var UnionTypeOverflow = errors.New("Union type overflow")
+
+// Happens when avro schema is unparsable or is invalid in any other way.
+var InvalidSchema = errors.New("Invalid schema")
+
+// Happens when a datum reader has no set schema.
+var SchemaNotSet = errors.New("Schema not set")
+
+// Specify a custom error message for indicating which necessary field in the struct is missing.
+func NewFieldDoesNotExistError(field string) error {
+ return errors.New(fmt.Sprintf("Field does not exist: [%v]", field))
+}
diff --git a/vendor/github.com/go-avro/avro/generic_record.go b/vendor/github.com/go-avro/avro/generic_record.go
new file mode 100644
index 00000000..e8c5976e
--- /dev/null
+++ b/vendor/github.com/go-avro/avro/generic_record.go
@@ -0,0 +1,86 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+contributor license agreements. See the NOTICE file distributed with
+this work for additional information regarding copyright ownership.
+The ASF licenses this file to You under the Apache License, Version 2.0
+(the "License"); you may not use this file except in compliance with
+the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License. */
+
+package avro
+
+import "encoding/json"
+
+// AvroRecord is an interface for anything that has an Avro schema and can be serialized/deserialized by this library.
+type AvroRecord interface {
+ // Schema returns an Avro schema for this AvroRecord.
+ Schema() Schema
+}
+
+// GenericRecord is a generic instance of a record schema.
+// Fields are accessible by their name.
+type GenericRecord struct {
+ fields map[string]interface{}
+ schema Schema
+}
+
+// NewGenericRecord creates a new GenericRecord.
+func NewGenericRecord(schema Schema) *GenericRecord {
+ return &GenericRecord{
+ fields: make(map[string]interface{}),
+ schema: schema,
+ }
+}
+
+// Get gets a value by its name.
+func (gr *GenericRecord) Get(name string) interface{} {
+ return gr.fields[name]
+}
+
+// Set sets a value for a given name.
+func (gr *GenericRecord) Set(name string, value interface{}) {
+ gr.fields[name] = value
+}
+
+// Schema returns a schema for this GenericRecord.
+func (gr *GenericRecord) Schema() Schema {
+ return gr.schema
+}
+
+// String returns a JSON representation of this GenericRecord.
+func (gr *GenericRecord) String() string {
+ m := gr.Map()
+ buf, err := json.Marshal(m)
+ if err != nil {
+ panic(err)
+ }
+ return string(buf)
+}
+
+// Map returns a map representation of this GenericRecord.
+func (gr *GenericRecord) Map() map[string]interface{} {
+ m := make(map[string]interface{})
+ for k, v := range gr.fields {
+ if r, ok := v.(*GenericRecord); ok {
+ v = r.Map()
+ }
+ if a, ok := v.([]interface{}); ok {
+ slice := make([]interface{}, len(a))
+ for i, elem := range a {
+ if rec, ok := elem.(*GenericRecord); ok {
+ elem = rec.Map()
+ }
+ slice[i] = elem
+ }
+ v = slice
+ }
+ m[k] = v
+ }
+ return m
+}
diff --git a/vendor/github.com/go-avro/avro/schema.go b/vendor/github.com/go-avro/avro/schema.go
new file mode 100644
index 00000000..b6d09119
--- /dev/null
+++ b/vendor/github.com/go-avro/avro/schema.go
@@ -0,0 +1,1183 @@
+package avro
+
+import (
+ "encoding/json"
+ "fmt"
+ "io/ioutil"
+ "math"
+ "reflect"
+ "strings"
+)
+
+// ***********************
+// NOTICE this file was changed beginning in November 2016 by the team maintaining
+// https://github.com/go-avro/avro. This notice is required to be here due to the
+// terms of the Apache license, see LICENSE for details.
+// ***********************
+
+const (
+ // Record schema type constant
+ Record int = iota
+
+ // Enum schema type constant
+ Enum
+
+ // Array schema type constant
+ Array
+
+ // Map schema type constant
+ Map
+
+ // Union schema type constant
+ Union
+
+ // Fixed schema type constant
+ Fixed
+
+ // String schema type constant
+ String
+
+ // Bytes schema type constant
+ Bytes
+
+ // Int schema type constant
+ Int
+
+ // Long schema type constant
+ Long
+
+ // Float schema type constant
+ Float
+
+ // Double schema type constant
+ Double
+
+ // Boolean schema type constant
+ Boolean
+
+ // Null schema type constant
+ Null
+
+ // Recursive schema type constant. Recursive is an artificial type that means a Record schema without its definition
+ // that should be looked up in some registry.
+ Recursive
+)
+
+const (
+ typeRecord = "record"
+ typeUnion = "union"
+ typeEnum = "enum"
+ typeArray = "array"
+ typeMap = "map"
+ typeFixed = "fixed"
+ typeString = "string"
+ typeBytes = "bytes"
+ typeInt = "int"
+ typeLong = "long"
+ typeFloat = "float"
+ typeDouble = "double"
+ typeBoolean = "boolean"
+ typeNull = "null"
+)
+
+const (
+ schemaAliasesField = "aliases"
+ schemaDefaultField = "default"
+ schemaDocField = "doc"
+ schemaFieldsField = "fields"
+ schemaItemsField = "items"
+ schemaNameField = "name"
+ schemaNamespaceField = "namespace"
+ schemaSizeField = "size"
+ schemaSymbolsField = "symbols"
+ schemaTypeField = "type"
+ schemaValuesField = "values"
+)
+
+// Schema is an interface representing a single Avro schema (both primitive and complex).
+type Schema interface {
+ // Returns an integer constant representing this schema type.
+ Type() int
+
+ // If this is a record, enum or fixed, returns its name, otherwise the name of the primitive type.
+ GetName() string
+
+ // Gets a custom non-reserved property from this schema and a bool representing if it exists.
+ Prop(key string) (interface{}, bool)
+
+ // Converts this schema to its JSON representation.
+ String() string
+
+ // Checks whether the given value is writeable to this schema.
+ Validate(v reflect.Value) bool
+}
+
+// StringSchema implements Schema and represents Avro string type.
+type StringSchema struct{}
+
+// Returns a JSON representation of StringSchema.
+func (*StringSchema) String() string {
+ return `{"type": "string"}`
+}
+
+// Type returns a type constant for this StringSchema.
+func (*StringSchema) Type() int {
+ return String
+}
+
+// GetName returns a type name for this StringSchema.
+func (*StringSchema) GetName() string {
+ return typeString
+}
+
+// Prop doesn't return anything valuable for StringSchema.
+func (*StringSchema) Prop(key string) (interface{}, bool) {
+ return nil, false
+}
+
+// Validate checks whether the given value is writeable to this schema.
+func (*StringSchema) Validate(v reflect.Value) bool {
+ _, ok := dereference(v).Interface().(string)
+ return ok
+}
+
+// MarshalJSON serializes the given schema as JSON. Never returns an error.
+func (*StringSchema) MarshalJSON() ([]byte, error) {
+ return []byte(`"string"`), nil
+}
+
+// BytesSchema implements Schema and represents Avro bytes type.
+type BytesSchema struct{}
+
+// String returns a JSON representation of BytesSchema.
+func (*BytesSchema) String() string {
+ return `{"type": "bytes"}`
+}
+
+// Type returns a type constant for this BytesSchema.
+func (*BytesSchema) Type() int {
+ return Bytes
+}
+
+// GetName returns a type name for this BytesSchema.
+func (*BytesSchema) GetName() string {
+ return typeBytes
+}
+
+// Prop doesn't return anything valuable for BytesSchema.
+func (*BytesSchema) Prop(key string) (interface{}, bool) {
+ return nil, false
+}
+
+// Validate checks whether the given value is writeable to this schema.
+func (*BytesSchema) Validate(v reflect.Value) bool {
+ v = dereference(v)
+
+ return v.Kind() == reflect.Slice && v.Type().Elem().Kind() == reflect.Uint8
+}
+
+// MarshalJSON serializes the given schema as JSON. Never returns an error.
+func (*BytesSchema) MarshalJSON() ([]byte, error) {
+ return []byte(`"bytes"`), nil
+}
+
+// IntSchema implements Schema and represents Avro int type.
+type IntSchema struct{}
+
+// String returns a JSON representation of IntSchema.
+func (*IntSchema) String() string {
+ return `{"type": "int"}`
+}
+
+// Type returns a type constant for this IntSchema.
+func (*IntSchema) Type() int {
+ return Int
+}
+
+// GetName returns a type name for this IntSchema.
+func (*IntSchema) GetName() string {
+ return typeInt
+}
+
+// Prop doesn't return anything valuable for IntSchema.
+func (*IntSchema) Prop(key string) (interface{}, bool) {
+ return nil, false
+}
+
+// Validate checks whether the given value is writeable to this schema.
+func (*IntSchema) Validate(v reflect.Value) bool {
+ return reflect.TypeOf(dereference(v).Interface()).Kind() == reflect.Int32
+}
+
+// MarshalJSON serializes the given schema as JSON. Never returns an error.
+func (*IntSchema) MarshalJSON() ([]byte, error) {
+ return []byte(`"int"`), nil
+}
+
+// LongSchema implements Schema and represents Avro long type.
+type LongSchema struct{}
+
+// Returns a JSON representation of LongSchema.
+func (*LongSchema) String() string {
+ return `{"type": "long"}`
+}
+
+// Type returns a type constant for this LongSchema.
+func (*LongSchema) Type() int {
+ return Long
+}
+
+// GetName returns a type name for this LongSchema.
+func (*LongSchema) GetName() string {
+ return typeLong
+}
+
+// Prop doesn't return anything valuable for LongSchema.
+func (*LongSchema) Prop(key string) (interface{}, bool) {
+ return nil, false
+}
+
+// Validate checks whether the given value is writeable to this schema.
+func (*LongSchema) Validate(v reflect.Value) bool {
+ return reflect.TypeOf(dereference(v).Interface()).Kind() == reflect.Int64
+}
+
+// MarshalJSON serializes the given schema as JSON. Never returns an error.
+func (*LongSchema) MarshalJSON() ([]byte, error) {
+ return []byte(`"long"`), nil
+}
+
+// FloatSchema implements Schema and represents Avro float type.
+type FloatSchema struct{}
+
+// String returns a JSON representation of FloatSchema.
+func (*FloatSchema) String() string {
+ return `{"type": "float"}`
+}
+
+// Type returns a type constant for this FloatSchema.
+func (*FloatSchema) Type() int {
+ return Float
+}
+
+// GetName returns a type name for this FloatSchema.
+func (*FloatSchema) GetName() string {
+ return typeFloat
+}
+
+// Prop doesn't return anything valuable for FloatSchema.
+func (*FloatSchema) Prop(key string) (interface{}, bool) {
+ return nil, false
+}
+
+// Validate checks whether the given value is writeable to this schema.
+func (*FloatSchema) Validate(v reflect.Value) bool {
+ return reflect.TypeOf(dereference(v).Interface()).Kind() == reflect.Float32
+}
+
+// MarshalJSON serializes the given schema as JSON. Never returns an error.
+func (*FloatSchema) MarshalJSON() ([]byte, error) {
+ return []byte(`"float"`), nil
+}
+
+// DoubleSchema implements Schema and represents Avro double type.
+type DoubleSchema struct{}
+
+// Returns a JSON representation of DoubleSchema.
+func (*DoubleSchema) String() string {
+ return `{"type": "double"}`
+}
+
+// Type returns a type constant for this DoubleSchema.
+func (*DoubleSchema) Type() int {
+ return Double
+}
+
+// GetName returns a type name for this DoubleSchema.
+func (*DoubleSchema) GetName() string {
+ return typeDouble
+}
+
+// Prop doesn't return anything valuable for DoubleSchema.
+func (*DoubleSchema) Prop(key string) (interface{}, bool) {
+ return nil, false
+}
+
+// Validate checks whether the given value is writeable to this schema.
+func (*DoubleSchema) Validate(v reflect.Value) bool {
+ return reflect.TypeOf(dereference(v).Interface()).Kind() == reflect.Float64
+}
+
+// MarshalJSON serializes the given schema as JSON. Never returns an error.
+func (*DoubleSchema) MarshalJSON() ([]byte, error) {
+ return []byte(`"double"`), nil
+}
+
+// BooleanSchema implements Schema and represents Avro boolean type.
+type BooleanSchema struct{}
+
+// String returns a JSON representation of BooleanSchema.
+func (*BooleanSchema) String() string {
+ return `{"type": "boolean"}`
+}
+
+// Type returns a type constant for this BooleanSchema.
+func (*BooleanSchema) Type() int {
+ return Boolean
+}
+
+// GetName returns a type name for this BooleanSchema.
+func (*BooleanSchema) GetName() string {
+ return typeBoolean
+}
+
+// Prop doesn't return anything valuable for BooleanSchema.
+func (*BooleanSchema) Prop(key string) (interface{}, bool) {
+ return nil, false
+}
+
+// Validate checks whether the given value is writeable to this schema.
+func (*BooleanSchema) Validate(v reflect.Value) bool {
+ return reflect.TypeOf(dereference(v).Interface()).Kind() == reflect.Bool
+}
+
+// MarshalJSON serializes the given schema as JSON. Never returns an error.
+func (*BooleanSchema) MarshalJSON() ([]byte, error) {
+ return []byte(`"boolean"`), nil
+}
+
+// NullSchema implements Schema and represents Avro null type.
+type NullSchema struct{}
+
+// String returns a JSON representation of NullSchema.
+func (*NullSchema) String() string {
+ return `{"type": "null"}`
+}
+
+// Type returns a type constant for this NullSchema.
+func (*NullSchema) Type() int {
+ return Null
+}
+
+// GetName returns a type name for this NullSchema.
+func (*NullSchema) GetName() string {
+ return typeNull
+}
+
+// Prop doesn't return anything valuable for NullSchema.
+func (*NullSchema) Prop(key string) (interface{}, bool) {
+ return nil, false
+}
+
+// Validate checks whether the given value is writeable to this schema.
+func (*NullSchema) Validate(v reflect.Value) bool {
+ // Check if the value is something that can be null
+ switch v.Kind() {
+ case reflect.Interface:
+ return v.IsNil()
+ case reflect.Array:
+ return v.Cap() == 0
+ case reflect.Slice:
+ return v.IsNil() || v.Cap() == 0
+ case reflect.Map:
+ return len(v.MapKeys()) == 0
+ case reflect.String:
+ return len(v.String()) == 0
+ case reflect.Float32:
+ // Should NaN floats be treated as null?
+ return math.IsNaN(v.Float())
+ case reflect.Float64:
+ // Should NaN floats be treated as null?
+ return math.IsNaN(v.Float())
+ case reflect.Ptr:
+ return v.IsNil()
+ case reflect.Invalid:
+ return true
+ }
+
+ // Nothing else in particular, so this should not validate?
+ return false
+}
+
+// MarshalJSON serializes the given schema as JSON. Never returns an error.
+func (*NullSchema) MarshalJSON() ([]byte, error) {
+ return []byte(`"null"`), nil
+}
+
+// RecordSchema implements Schema and represents Avro record type.
+type RecordSchema struct {
+ Name string `json:"name,omitempty"`
+ Namespace string `json:"namespace,omitempty"`
+ Doc string `json:"doc,omitempty"`
+ Aliases []string `json:"aliases,omitempty"`
+ Properties map[string]interface{}
+ Fields []*SchemaField `json:"fields"`
+}
+
+// String returns a JSON representation of RecordSchema.
+func (s *RecordSchema) String() string {
+ bytes, err := json.MarshalIndent(s, "", " ")
+ if err != nil {
+ panic(err)
+ }
+
+ return string(bytes)
+}
+
+// MarshalJSON serializes the given schema as JSON.
+func (s *RecordSchema) MarshalJSON() ([]byte, error) {
+ return json.Marshal(struct {
+ Type string `json:"type,omitempty"`
+ Namespace string `json:"namespace,omitempty"`
+ Name string `json:"name,omitempty"`
+ Doc string `json:"doc,omitempty"`
+ Aliases []string `json:"aliases,omitempty"`
+ Fields []*SchemaField `json:"fields"`
+ }{
+ Type: "record",
+ Namespace: s.Namespace,
+ Name: s.Name,
+ Doc: s.Doc,
+ Aliases: s.Aliases,
+ Fields: s.Fields,
+ })
+}
+
+// Type returns a type constant for this RecordSchema.
+func (*RecordSchema) Type() int {
+ return Record
+}
+
+// GetName returns a record name for this RecordSchema.
+func (s *RecordSchema) GetName() string {
+ return s.Name
+}
+
+// Prop gets a custom non-reserved property from this schema and a bool representing if it exists.
+func (s *RecordSchema) Prop(key string) (interface{}, bool) {
+ if s.Properties != nil {
+ if prop, ok := s.Properties[key]; ok {
+ return prop, true
+ }
+ }
+
+ return nil, false
+}
+
+// Validate checks whether the given value is writeable to this schema.
+func (s *RecordSchema) Validate(v reflect.Value) bool {
+ v = dereference(v)
+ if v.Kind() != reflect.Struct || !v.CanAddr() || !v.CanInterface() {
+ return false
+ }
+ rec, ok := v.Interface().(GenericRecord)
+ if !ok {
+ // This is not a generic record and is likely a specific record. Hence
+ // use the basic check.
+ return v.Kind() == reflect.Struct
+ }
+
+ fieldCount := 0
+ for key, val := range rec.fields {
+ for idx := range s.Fields {
+ // key.Name must have rs.Fields[idx].Name as a suffix
+ if len(s.Fields[idx].Name) <= len(key) {
+ lhs := key[len(key)-len(s.Fields[idx].Name):]
+ if lhs == s.Fields[idx].Name {
+ if !s.Fields[idx].Type.Validate(reflect.ValueOf(val)) {
+ return false
+ }
+ fieldCount++
+ break
+ }
+ }
+ }
+ }
+
+ // All of the fields set must be accounted for in the union.
+ if fieldCount < len(rec.fields) {
+ return false
+ }
+
+ return true
+}
+
+// RecursiveSchema implements Schema and represents Avro record type without a definition (e.g. that should be looked up).
+type RecursiveSchema struct {
+ Actual *RecordSchema
+}
+
+func newRecursiveSchema(parent *RecordSchema) *RecursiveSchema {
+ return &RecursiveSchema{
+ Actual: parent,
+ }
+}
+
+// String returns a JSON representation of RecursiveSchema.
+func (s *RecursiveSchema) String() string {
+ return fmt.Sprintf(`{"type": "%s"}`, s.Actual.GetName())
+}
+
+// Type returns a type constant for this RecursiveSchema.
+func (*RecursiveSchema) Type() int {
+ return Recursive
+}
+
+// GetName returns a record name for enclosed RecordSchema.
+func (s *RecursiveSchema) GetName() string {
+ return s.Actual.GetName()
+}
+
+// Prop doesn't return anything valuable for RecursiveSchema.
+func (*RecursiveSchema) Prop(key string) (interface{}, bool) {
+ return nil, false
+}
+
+// Validate checks whether the given value is writeable to this schema.
+func (s *RecursiveSchema) Validate(v reflect.Value) bool {
+ return s.Actual.Validate(v)
+}
+
+// MarshalJSON serializes the given schema as JSON. Never returns an error.
+func (s *RecursiveSchema) MarshalJSON() ([]byte, error) {
+ return []byte(fmt.Sprintf(`"%s"`, s.Actual.GetName())), nil
+}
+
+// SchemaField represents a schema field for Avro record.
+type SchemaField struct {
+ Name string `json:"name,omitempty"`
+ Doc string `json:"doc,omitempty"`
+ Default interface{} `json:"default"`
+ Type Schema `json:"type,omitempty"`
+ Properties map[string]interface{}
+}
+
+// Gets a custom non-reserved property from this schemafield and a bool representing if it exists.
+func (this *SchemaField) Prop(key string) (interface{}, bool) {
+ if this.Properties != nil {
+ if prop, ok := this.Properties[key]; ok {
+ return prop, true
+ }
+ }
+ return nil, false
+}
+
+// MarshalJSON serializes the given schema field as JSON.
+func (s *SchemaField) MarshalJSON() ([]byte, error) {
+ if s.Type.Type() == Null || (s.Type.Type() == Union && s.Type.(*UnionSchema).Types[0].Type() == Null) {
+ return json.Marshal(struct {
+ Name string `json:"name,omitempty"`
+ Doc string `json:"doc,omitempty"`
+ Default interface{} `json:"default"`
+ Type Schema `json:"type,omitempty"`
+ }{
+ Name: s.Name,
+ Doc: s.Doc,
+ Default: s.Default,
+ Type: s.Type,
+ })
+ }
+
+ return json.Marshal(struct {
+ Name string `json:"name,omitempty"`
+ Doc string `json:"doc,omitempty"`
+ Default interface{} `json:"default,omitempty"`
+ Type Schema `json:"type,omitempty"`
+ }{
+ Name: s.Name,
+ Doc: s.Doc,
+ Default: s.Default,
+ Type: s.Type,
+ })
+}
+
+// String returns a JSON representation of SchemaField.
+func (s *SchemaField) String() string {
+ return fmt.Sprintf("[SchemaField: Name: %s, Doc: %s, Default: %v, Type: %s]", s.Name, s.Doc, s.Default, s.Type)
+}
+
+// EnumSchema implements Schema and represents Avro enum type.
+type EnumSchema struct {
+ Name string
+ Namespace string
+ Aliases []string
+ Doc string
+ Symbols []string
+ Properties map[string]interface{}
+}
+
+// String returns a JSON representation of EnumSchema.
+func (s *EnumSchema) String() string {
+ bytes, err := json.MarshalIndent(s, "", " ")
+ if err != nil {
+ panic(err)
+ }
+
+ return string(bytes)
+}
+
+// Type returns a type constant for this EnumSchema.
+func (*EnumSchema) Type() int {
+ return Enum
+}
+
+// GetName returns an enum name for this EnumSchema.
+func (s *EnumSchema) GetName() string {
+ return s.Name
+}
+
+// Prop gets a custom non-reserved property from this schema and a bool representing if it exists.
+func (s *EnumSchema) Prop(key string) (interface{}, bool) {
+ if s.Properties != nil {
+ if prop, ok := s.Properties[key]; ok {
+ return prop, true
+ }
+ }
+
+ return nil, false
+}
+
+// Validate checks whether the given value is writeable to this schema.
+func (*EnumSchema) Validate(v reflect.Value) bool {
+ //TODO implement
+ return true
+}
+
+// MarshalJSON serializes the given schema as JSON.
+func (s *EnumSchema) MarshalJSON() ([]byte, error) {
+ return json.Marshal(struct {
+ Type string `json:"type,omitempty"`
+ Namespace string `json:"namespace,omitempty"`
+ Name string `json:"name,omitempty"`
+ Doc string `json:"doc,omitempty"`
+ Symbols []string `json:"symbols,omitempty"`
+ }{
+ Type: "enum",
+ Namespace: s.Namespace,
+ Name: s.Name,
+ Doc: s.Doc,
+ Symbols: s.Symbols,
+ })
+}
+
+// ArraySchema implements Schema and represents Avro array type.
+type ArraySchema struct {
+ Items Schema
+ Properties map[string]interface{}
+}
+
+// String returns a JSON representation of ArraySchema.
+func (s *ArraySchema) String() string {
+ bytes, err := json.MarshalIndent(s, "", " ")
+ if err != nil {
+ panic(err)
+ }
+
+ return string(bytes)
+}
+
+// Type returns a type constant for this ArraySchema.
+func (*ArraySchema) Type() int {
+ return Array
+}
+
+// GetName returns a type name for this ArraySchema.
+func (*ArraySchema) GetName() string {
+ return typeArray
+}
+
+// Prop gets a custom non-reserved property from this schema and a bool representing if it exists.
+func (s *ArraySchema) Prop(key string) (interface{}, bool) {
+ if s.Properties != nil {
+ if prop, ok := s.Properties[key]; ok {
+ return prop, true
+ }
+ }
+
+ return nil, false
+}
+
+// Validate checks whether the given value is writeable to this schema.
+func (s *ArraySchema) Validate(v reflect.Value) bool {
+ v = dereference(v)
+
+ // This needs to be a slice
+ return v.Kind() == reflect.Slice || v.Kind() == reflect.Array
+}
+
+// MarshalJSON serializes the given schema as JSON.
+func (s *ArraySchema) MarshalJSON() ([]byte, error) {
+ return json.Marshal(struct {
+ Type string `json:"type,omitempty"`
+ Items Schema `json:"items,omitempty"`
+ }{
+ Type: "array",
+ Items: s.Items,
+ })
+}
+
+// MapSchema implements Schema and represents Avro map type.
+type MapSchema struct {
+ Values Schema
+ Properties map[string]interface{}
+}
+
+// String returns a JSON representation of MapSchema.
+func (s *MapSchema) String() string {
+ bytes, err := json.MarshalIndent(s, "", " ")
+ if err != nil {
+ panic(err)
+ }
+
+ return string(bytes)
+}
+
+// Type returns a type constant for this MapSchema.
+func (*MapSchema) Type() int {
+ return Map
+}
+
+// GetName returns a type name for this MapSchema.
+func (*MapSchema) GetName() string {
+ return typeMap
+}
+
+// Prop gets a custom non-reserved property from this schema and a bool representing if it exists.
+func (s *MapSchema) Prop(key string) (interface{}, bool) {
+ if s.Properties != nil {
+ if prop, ok := s.Properties[key]; ok {
+ return prop, true
+ }
+ }
+ return nil, false
+}
+
+// Validate checks whether the given value is writeable to this schema.
+func (s *MapSchema) Validate(v reflect.Value) bool {
+ v = dereference(v)
+
+ return v.Kind() == reflect.Map && v.Type().Key().Kind() == reflect.String
+}
+
+// MarshalJSON serializes the given schema as JSON.
+func (s *MapSchema) MarshalJSON() ([]byte, error) {
+ return json.Marshal(struct {
+ Type string `json:"type,omitempty"`
+ Values Schema `json:"values,omitempty"`
+ }{
+ Type: "map",
+ Values: s.Values,
+ })
+}
+
+// UnionSchema implements Schema and represents Avro union type.
+type UnionSchema struct {
+ Types []Schema
+}
+
+// String returns a JSON representation of UnionSchema.
+func (s *UnionSchema) String() string {
+ bytes, err := json.MarshalIndent(s, "", " ")
+ if err != nil {
+ panic(err)
+ }
+
+ return fmt.Sprintf(`{"type": %s}`, string(bytes))
+}
+
+// Type returns a type constant for this UnionSchema.
+func (*UnionSchema) Type() int {
+ return Union
+}
+
+// GetName returns a type name for this UnionSchema.
+func (*UnionSchema) GetName() string {
+ return typeUnion
+}
+
+// Prop doesn't return anything valuable for UnionSchema.
+func (*UnionSchema) Prop(key string) (interface{}, bool) {
+ return nil, false
+}
+
+// GetType gets the index of actual union type for a given value.
+func (s *UnionSchema) GetType(v reflect.Value) int {
+ if s.Types != nil {
+ for i := range s.Types {
+ if t := s.Types[i]; t.Validate(v) {
+ return i
+ }
+ }
+ }
+
+ return -1
+}
+
+// Validate checks whether the given value is writeable to this schema.
+func (s *UnionSchema) Validate(v reflect.Value) bool {
+ v = dereference(v)
+ for i := range s.Types {
+ if t := s.Types[i]; t.Validate(v) {
+ return true
+ }
+ }
+
+ return false
+}
+
+// MarshalJSON serializes the given schema as JSON.
+func (s *UnionSchema) MarshalJSON() ([]byte, error) {
+ return json.Marshal(s.Types)
+}
+
+// FixedSchema implements Schema and represents Avro fixed type.
+type FixedSchema struct {
+ Namespace string
+ Name string
+ Size int
+ Properties map[string]interface{}
+}
+
+// String returns a JSON representation of FixedSchema.
+func (s *FixedSchema) String() string {
+ bytes, err := json.MarshalIndent(s, "", " ")
+ if err != nil {
+ panic(err)
+ }
+
+ return string(bytes)
+}
+
+// Type returns a type constant for this FixedSchema.
+func (*FixedSchema) Type() int {
+ return Fixed
+}
+
+// GetName returns a fixed name for this FixedSchema.
+func (s *FixedSchema) GetName() string {
+ return s.Name
+}
+
+// Prop gets a custom non-reserved property from this schema and a bool representing if it exists.
+func (s *FixedSchema) Prop(key string) (interface{}, bool) {
+ if s.Properties != nil {
+ if prop, ok := s.Properties[key]; ok {
+ return prop, true
+ }
+ }
+ return nil, false
+}
+
+// Validate checks whether the given value is writeable to this schema.
+func (s *FixedSchema) Validate(v reflect.Value) bool {
+ v = dereference(v)
+
+ return (v.Kind() == reflect.Array || v.Kind() == reflect.Slice) && v.Type().Elem().Kind() == reflect.Uint8 && v.Len() == s.Size
+}
+
+// MarshalJSON serializes the given schema as JSON.
+func (s *FixedSchema) MarshalJSON() ([]byte, error) {
+ return json.Marshal(struct {
+ Type string `json:"type,omitempty"`
+ Size int `json:"size,omitempty"`
+ Name string `json:"name,omitempty"`
+ }{
+ Type: "fixed",
+ Size: s.Size,
+ Name: s.Name,
+ })
+}
+
+// GetFullName returns a fully-qualified name for a schema if possible. The format is namespace.name.
+func GetFullName(schema Schema) string {
+ switch sch := schema.(type) {
+ case *RecordSchema:
+ return getFullName(sch.GetName(), sch.Namespace)
+ case *EnumSchema:
+ return getFullName(sch.GetName(), sch.Namespace)
+ case *FixedSchema:
+ return getFullName(sch.GetName(), sch.Namespace)
+ default:
+ return schema.GetName()
+ }
+}
+
+// ParseSchemaFile parses a given file.
+// May return an error if schema is not parsable or file does not exist.
+func ParseSchemaFile(file string) (Schema, error) {
+ fileContents, err := ioutil.ReadFile(file)
+ if err != nil {
+ return nil, err
+ }
+
+ return ParseSchema(string(fileContents))
+}
+
+// ParseSchema parses a given schema without provided schemas to reuse.
+// Equivalent to call ParseSchemaWithResistry(rawSchema, make(map[string]Schema))
+// May return an error if schema is not parsable or has insufficient information about any type.
+func ParseSchema(rawSchema string) (Schema, error) {
+ return ParseSchemaWithRegistry(rawSchema, make(map[string]Schema))
+}
+
+// ParseSchemaWithRegistry parses a given schema using the provided registry for type lookup.
+// Registry will be filled up during parsing.
+// May return an error if schema is not parsable or has insufficient information about any type.
+func ParseSchemaWithRegistry(rawSchema string, schemas map[string]Schema) (Schema, error) {
+ var schema interface{}
+ if err := json.Unmarshal([]byte(rawSchema), &schema); err != nil {
+ schema = rawSchema
+ }
+
+ return schemaByType(schema, schemas, "")
+}
+
+// MustParseSchema is like ParseSchema, but panics if the given schema cannot be parsed.
+func MustParseSchema(rawSchema string) Schema {
+ s, err := ParseSchema(rawSchema)
+ if err != nil {
+ panic(err)
+ }
+ return s
+}
+
+func schemaByType(i interface{}, registry map[string]Schema, namespace string) (Schema, error) {
+ switch v := i.(type) {
+ case nil:
+ return new(NullSchema), nil
+ case string:
+ switch v {
+ case typeNull:
+ return new(NullSchema), nil
+ case typeBoolean:
+ return new(BooleanSchema), nil
+ case typeInt:
+ return new(IntSchema), nil
+ case typeLong:
+ return new(LongSchema), nil
+ case typeFloat:
+ return new(FloatSchema), nil
+ case typeDouble:
+ return new(DoubleSchema), nil
+ case typeBytes:
+ return new(BytesSchema), nil
+ case typeString:
+ return new(StringSchema), nil
+ default:
+ // If a name reference contains a dot, we consider it a full name reference.
+ // Otherwise, use the getFullName helper to look up the name.
+ // See https://avro.apache.org/docs/1.7.7/spec.html#Names
+ fullName := v
+ if !strings.ContainsRune(fullName, '.') {
+ fullName = getFullName(v, namespace)
+ }
+ schema, ok := registry[fullName]
+ if !ok {
+ return nil, fmt.Errorf("Unknown type name: %s", v)
+ }
+
+ return schema, nil
+ }
+ case map[string][]interface{}:
+ return parseUnionSchema(v[schemaTypeField], registry, namespace)
+ case map[string]interface{}:
+ switch v[schemaTypeField] {
+ case typeNull:
+ return new(NullSchema), nil
+ case typeBoolean:
+ return new(BooleanSchema), nil
+ case typeInt:
+ return new(IntSchema), nil
+ case typeLong:
+ return new(LongSchema), nil
+ case typeFloat:
+ return new(FloatSchema), nil
+ case typeDouble:
+ return new(DoubleSchema), nil
+ case typeBytes:
+ return new(BytesSchema), nil
+ case typeString:
+ return new(StringSchema), nil
+ case typeArray:
+ items, err := schemaByType(v[schemaItemsField], registry, namespace)
+ if err != nil {
+ return nil, err
+ }
+ return &ArraySchema{Items: items, Properties: getProperties(v)}, nil
+ case typeMap:
+ values, err := schemaByType(v[schemaValuesField], registry, namespace)
+ if err != nil {
+ return nil, err
+ }
+ return &MapSchema{Values: values, Properties: getProperties(v)}, nil
+ case typeEnum:
+ return parseEnumSchema(v, registry, namespace)
+ case typeFixed:
+ return parseFixedSchema(v, registry, namespace)
+ case typeRecord:
+ return parseRecordSchema(v, registry, namespace)
+ default:
+ // Type references can also be done as {"type": "otherType"}.
+ // Just call back in so we can handle this scenario in the string matcher above.
+ return schemaByType(v[schemaTypeField], registry, namespace)
+ }
+ case []interface{}:
+ return parseUnionSchema(v, registry, namespace)
+ }
+
+ return nil, InvalidSchema
+}
+
+func parseEnumSchema(v map[string]interface{}, registry map[string]Schema, namespace string) (Schema, error) {
+ symbols := make([]string, len(v[schemaSymbolsField].([]interface{})))
+ for i, symbol := range v[schemaSymbolsField].([]interface{}) {
+ symbols[i] = symbol.(string)
+ }
+
+ schema := &EnumSchema{Name: v[schemaNameField].(string), Symbols: symbols}
+ setOptionalField(&schema.Namespace, v, schemaNamespaceField)
+ setOptionalField(&schema.Doc, v, schemaDocField)
+ schema.Properties = getProperties(v)
+
+ return addSchema(getFullName(v[schemaNameField].(string), namespace), schema, registry), nil
+}
+
+func parseFixedSchema(v map[string]interface{}, registry map[string]Schema, namespace string) (Schema, error) {
+ size, ok := v[schemaSizeField].(float64)
+ if !ok {
+ return nil, InvalidFixedSize
+ }
+
+ schema := &FixedSchema{Name: v[schemaNameField].(string), Size: int(size), Properties: getProperties(v)}
+ setOptionalField(&schema.Namespace, v, schemaNamespaceField)
+ return addSchema(getFullName(v[schemaNameField].(string), namespace), schema, registry), nil
+}
+
+func parseUnionSchema(v []interface{}, registry map[string]Schema, namespace string) (Schema, error) {
+ types := make([]Schema, len(v))
+ var err error
+ for i := range v {
+ types[i], err = schemaByType(v[i], registry, namespace)
+ if err != nil {
+ return nil, err
+ }
+ }
+ return &UnionSchema{Types: types}, nil
+}
+
+func parseRecordSchema(v map[string]interface{}, registry map[string]Schema, namespace string) (Schema, error) {
+ schema := &RecordSchema{Name: v[schemaNameField].(string)}
+ setOptionalField(&schema.Namespace, v, schemaNamespaceField)
+ setOptionalField(&namespace, v, schemaNamespaceField)
+ setOptionalField(&schema.Doc, v, schemaDocField)
+ addSchema(getFullName(v[schemaNameField].(string), namespace), newRecursiveSchema(schema), registry)
+ fields := make([]*SchemaField, len(v[schemaFieldsField].([]interface{})))
+ for i := range fields {
+ field, err := parseSchemaField(v[schemaFieldsField].([]interface{})[i], registry, namespace)
+ if err != nil {
+ return nil, err
+ }
+ fields[i] = field
+ }
+ schema.Fields = fields
+ schema.Properties = getProperties(v)
+
+ return schema, nil
+}
+
+func parseSchemaField(i interface{}, registry map[string]Schema, namespace string) (*SchemaField, error) {
+ switch v := i.(type) {
+ case map[string]interface{}:
+ name, ok := v[schemaNameField].(string)
+ if !ok {
+ return nil, fmt.Errorf("Schema field name missing")
+ }
+ schemaField := &SchemaField{Name: name, Properties: getProperties(v)}
+ setOptionalField(&schemaField.Doc, v, schemaDocField)
+ fieldType, err := schemaByType(v[schemaTypeField], registry, namespace)
+ if err != nil {
+ return nil, err
+ }
+ schemaField.Type = fieldType
+ if def, exists := v[schemaDefaultField]; exists {
+ switch def.(type) {
+ case float64:
+ // JSON treats all numbers as float64 by default
+ switch schemaField.Type.Type() {
+ case Int:
+ var converted = int32(def.(float64))
+ schemaField.Default = converted
+ case Long:
+ var converted = int64(def.(float64))
+ schemaField.Default = converted
+ case Float:
+ var converted = float32(def.(float64))
+ schemaField.Default = converted
+
+ default:
+ schemaField.Default = def
+ }
+ default:
+ schemaField.Default = def
+ }
+ }
+ return schemaField, nil
+ }
+
+ return nil, InvalidSchema
+}
+
+func setOptionalField(where *string, v map[string]interface{}, fieldName string) {
+ if field, exists := v[fieldName]; exists {
+ *where = field.(string)
+ }
+}
+
+func addSchema(name string, schema Schema, schemas map[string]Schema) Schema {
+ if schemas != nil {
+ if sch, ok := schemas[name]; ok {
+ return sch
+ }
+
+ schemas[name] = schema
+ }
+
+ return schema
+}
+
+func getFullName(name string, namespace string) string {
+ if len(namespace) > 0 && !strings.ContainsRune(name, '.') {
+ return namespace + "." + name
+ }
+
+ return name
+}
+
+// gets custom string properties from a given schema
+func getProperties(v map[string]interface{}) map[string]interface{} {
+ props := make(map[string]interface{})
+ for name, value := range v {
+ if !isReserved(name) {
+ props[name] = value
+ }
+ }
+ return props
+}
+
+func isReserved(name string) bool {
+ switch name {
+ case schemaAliasesField, schemaDocField, schemaFieldsField, schemaItemsField, schemaNameField,
+ schemaNamespaceField, schemaSizeField, schemaSymbolsField, schemaTypeField, schemaValuesField:
+ return true
+ }
+
+ return false
+}
+
+func dereference(v reflect.Value) reflect.Value {
+ if v.Kind() == reflect.Ptr {
+ return v.Elem()
+ }
+
+ return v
+}
diff --git a/vendor/github.com/go-avro/avro/schema_loader.go b/vendor/github.com/go-avro/avro/schema_loader.go
new file mode 100644
index 00000000..19298485
--- /dev/null
+++ b/vendor/github.com/go-avro/avro/schema_loader.go
@@ -0,0 +1,109 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+contributor license agreements. See the NOTICE file distributed with
+this work for additional information regarding copyright ownership.
+The ASF licenses this file to You under the Apache License, Version 2.0
+(the "License"); you may not use this file except in compliance with
+the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License. */
+
+package avro
+
+import (
+ "io/ioutil"
+ "strings"
+)
+
+const schemaExtension = ".avsc"
+
+// LoadSchemas loads and parses a schema file or directory.
+// Directory names MUST end with "/"
+func LoadSchemas(path string) map[string]Schema {
+ files := getFiles(path, make([]string, 0))
+
+ schemas := make(map[string]Schema)
+
+ if files != nil {
+ for _, file := range files {
+ if _, err := loadSchema(path, file, schemas); err != nil {
+ return make(map[string]Schema)
+ }
+ }
+ }
+
+ return schemas
+}
+
+func getFiles(path string, files []string) []string {
+ list, err := ioutil.ReadDir(path)
+ if err != nil {
+ return nil
+ }
+
+ for _, file := range list {
+ if file.IsDir() {
+ files = getFiles(path+file.Name()+"/", files)
+ if files == nil {
+ return nil
+ }
+ } else if file.Mode().IsRegular() {
+ if strings.HasSuffix(file.Name(), schemaExtension) {
+ files = addFile(path+file.Name(), files)
+ }
+ }
+ }
+
+ return files
+}
+
+func addFile(path string, files []string) []string {
+ n := len(files)
+ if n == cap(files) {
+ newFiles := make([]string, len(files), 2*len(files)+1)
+ copy(newFiles, files)
+ files = newFiles
+ }
+
+ files = files[0 : n+1]
+ files[n] = path
+
+ return files
+}
+
+func loadSchema(basePath, avscPath string, schemas map[string]Schema) (Schema, error) {
+ avscJSON, err := ioutil.ReadFile(avscPath)
+ if err != nil {
+ return nil, err
+ }
+
+ var sch Schema
+ for {
+ sch, err = ParseSchemaWithRegistry(string(avscJSON), schemas)
+
+ if err != nil {
+ text := err.Error()
+ if strings.HasPrefix(text, "Undefined schema:") {
+ typ := text[18:len(text)]
+ path := basePath + strings.Replace(typ, ".", "/", -1) + schemaExtension
+
+ _, errDep := loadSchema(basePath, path, schemas)
+
+ if errDep != nil {
+ return nil, errDep
+ }
+
+ continue
+ }
+
+ return nil, err
+ }
+
+ return sch, nil
+ }
+}
diff --git a/vendor/github.com/go-avro/avro/schema_prepared.go b/vendor/github.com/go-avro/avro/schema_prepared.go
new file mode 100644
index 00000000..0093d168
--- /dev/null
+++ b/vendor/github.com/go-avro/avro/schema_prepared.go
@@ -0,0 +1,141 @@
+package avro
+
+import (
+ "fmt"
+ "reflect"
+ "sync"
+)
+
+/*
+Prepare optimizes a schema for decoding/encoding.
+
+It makes a recursive copy of the schema given and returns an immutable
+wrapper of the schema with some optimizations applied.
+*/
+func Prepare(schema Schema) Schema {
+ job := prepareJob{
+ seen: make(map[Schema]Schema),
+ }
+ return job.prepare(schema)
+}
+
+type prepareJob struct {
+ // the seen struct prevents infinite recursion by caching conversions.
+ seen map[Schema]Schema
+}
+
+func (job *prepareJob) prepare(schema Schema) Schema {
+ output := schema
+ switch schema := schema.(type) {
+ case *RecordSchema:
+ output = job.prepareRecordSchema(schema)
+ case *RecursiveSchema:
+ if seen := job.seen[schema.Actual]; seen != nil {
+ return seen
+ } else {
+ return job.prepare(schema.Actual)
+ }
+ case *UnionSchema:
+ output = job.prepareUnionSchema(schema)
+ case *ArraySchema:
+ output = job.prepareArraySchema(schema)
+ default:
+ return schema
+ }
+ job.seen[schema] = output
+ return output
+}
+
+func (job *prepareJob) prepareUnionSchema(input *UnionSchema) Schema {
+ output := &UnionSchema{
+ Types: make([]Schema, len(input.Types)),
+ }
+ for i, t := range input.Types {
+ output.Types[i] = job.prepare(t)
+ }
+ return output
+}
+
+func (job *prepareJob) prepareArraySchema(input *ArraySchema) Schema {
+ return &ArraySchema{
+ Properties: input.Properties,
+ Items: job.prepare(input.Items),
+ }
+}
+func (job *prepareJob) prepareMapSchema(input *MapSchema) Schema {
+ return &MapSchema{
+ Properties: input.Properties,
+ Values: job.prepare(input.Values),
+ }
+}
+
+func (job *prepareJob) prepareRecordSchema(input *RecordSchema) *preparedRecordSchema {
+ output := &preparedRecordSchema{
+ RecordSchema: *input,
+ pool: sync.Pool{New: func() interface{} { return make(map[reflect.Type]*recordPlan) }},
+ }
+ output.Fields = nil
+ for _, field := range input.Fields {
+ output.Fields = append(output.Fields, &SchemaField{
+ Name: field.Name,
+ Doc: field.Doc,
+ Default: field.Default,
+ Type: job.prepare(field.Type),
+ })
+ }
+ return output
+}
+
+type preparedRecordSchema struct {
+ RecordSchema
+ pool sync.Pool
+}
+
+func (rs *preparedRecordSchema) getPlan(t reflect.Type) (plan *recordPlan, err error) {
+ cache := rs.pool.Get().(map[reflect.Type]*recordPlan)
+ if plan = cache[t]; plan != nil {
+ rs.pool.Put(cache)
+ return
+ }
+
+ // Use the reflectmap to get field info.
+ ri := reflectEnsureRi(t)
+
+ decodePlan := make([]structFieldPlan, len(rs.Fields))
+ for i, schemafield := range rs.Fields {
+ index, ok := ri.names[schemafield.Name]
+ if !ok {
+ err = fmt.Errorf("Type %v does not have field %s required for decoding schema", t, schemafield.Name)
+ }
+ entry := &decodePlan[i]
+ entry.schema = schemafield.Type
+ entry.name = schemafield.Name
+ entry.index = index
+ entry.dec = specificDecoder(entry)
+ }
+
+ plan = &recordPlan{
+ // Over time, we will create decode/encode plans for more things.
+ decodePlan: decodePlan,
+ }
+ cache[t] = plan
+ rs.pool.Put(cache)
+ return
+}
+
+// This is used
+var sdr sDatumReader
+
+type recordPlan struct {
+ decodePlan []structFieldPlan
+}
+
+// For right now, until we implement more optimizations,
+// we have a lot of cases we want a *RecordSchema. This makes it a bit easier to deal with.
+func assertRecordSchema(s Schema) *RecordSchema {
+ rs, ok := s.(*RecordSchema)
+ if !ok {
+ rs = &s.(*preparedRecordSchema).RecordSchema
+ }
+ return rs
+}
diff --git a/vendor/github.com/go-avro/avro/schema_prepared_specific.go b/vendor/github.com/go-avro/avro/schema_prepared_specific.go
new file mode 100644
index 00000000..7e275cb9
--- /dev/null
+++ b/vendor/github.com/go-avro/avro/schema_prepared_specific.go
@@ -0,0 +1,53 @@
+package avro
+
+import "reflect"
+
+func specificDecoder(entry *structFieldPlan) preparedDecoder {
+ switch entry.schema.Type() {
+ case Record:
+ return recordDec(entry.schema)
+ case Enum:
+ return enumDec(entry.schema.(*EnumSchema))
+ default:
+ // Generic decoders get less drastic speedups, but we can add more later.
+ return genericDec(entry.schema)
+ }
+}
+
+// structFieldPlan is a plan that assists in decoding
+type structFieldPlan struct {
+ name string
+ index []int
+ schema Schema
+ dec preparedDecoder
+}
+
+type preparedDecoder func(reflectField reflect.Value, dec Decoder) (reflect.Value, error)
+
+func genericDec(schema Schema) preparedDecoder {
+ return func(reflectField reflect.Value, dec Decoder) (reflect.Value, error) {
+ return sdr.readValue(schema, reflectField, dec)
+ }
+}
+
+func enumDec(schema *EnumSchema) preparedDecoder {
+ symbolsToIndex := NewGenericEnum(schema.Symbols).symbolsToIndex
+ return func(reflectField reflect.Value, dec Decoder) (reflect.Value, error) {
+ enumIndex, err := dec.ReadEnum()
+ if err != nil {
+ return reflect.ValueOf(enumIndex), err
+ }
+ enum := &GenericEnum{
+ Symbols: schema.Symbols,
+ symbolsToIndex: symbolsToIndex,
+ index: enumIndex,
+ }
+ return reflect.ValueOf(enum), nil
+ }
+}
+
+func recordDec(schema Schema) preparedDecoder {
+ return func(reflectField reflect.Value, dec Decoder) (reflect.Value, error) {
+ return sdr.mapRecord(schema, reflectField, dec)
+ }
+}
diff --git a/vendor/github.com/golang/snappy/AUTHORS b/vendor/github.com/golang/snappy/AUTHORS
new file mode 100644
index 00000000..bcfa1952
--- /dev/null
+++ b/vendor/github.com/golang/snappy/AUTHORS
@@ -0,0 +1,15 @@
+# This is the official list of Snappy-Go authors for copyright purposes.
+# This file is distinct from the CONTRIBUTORS files.
+# See the latter for an explanation.
+
+# Names should be added to this file as
+# Name or Organization
+# The email address is not required for organizations.
+
+# Please keep the list sorted.
+
+Damian Gryski
+Google Inc.
+Jan Mercl <0xjnml@gmail.com>
+Rodolfo Carvalho
+Sebastien Binet
diff --git a/vendor/github.com/golang/snappy/CONTRIBUTORS b/vendor/github.com/golang/snappy/CONTRIBUTORS
new file mode 100644
index 00000000..931ae316
--- /dev/null
+++ b/vendor/github.com/golang/snappy/CONTRIBUTORS
@@ -0,0 +1,37 @@
+# This is the official list of people who can contribute
+# (and typically have contributed) code to the Snappy-Go repository.
+# The AUTHORS file lists the copyright holders; this file
+# lists people. For example, Google employees are listed here
+# but not in AUTHORS, because Google holds the copyright.
+#
+# The submission process automatically checks to make sure
+# that people submitting code are listed in this file (by email address).
+#
+# Names should be added to this file only after verifying that
+# the individual or the individual's organization has agreed to
+# the appropriate Contributor License Agreement, found here:
+#
+# http://code.google.com/legal/individual-cla-v1.0.html
+# http://code.google.com/legal/corporate-cla-v1.0.html
+#
+# The agreement for individuals can be filled out on the web.
+#
+# When adding J Random Contributor's name to this file,
+# either J's name or J's organization's name should be
+# added to the AUTHORS file, depending on whether the
+# individual or corporate CLA was used.
+
+# Names should be added to this file like so:
+# Name
+
+# Please keep the list sorted.
+
+Damian Gryski
+Jan Mercl <0xjnml@gmail.com>
+Kai Backman
+Marc-Antoine Ruel
+Nigel Tao
+Rob Pike
+Rodolfo Carvalho
+Russ Cox
+Sebastien Binet
diff --git a/vendor/github.com/golang/snappy/LICENSE b/vendor/github.com/golang/snappy/LICENSE
new file mode 100644
index 00000000..6050c10f
--- /dev/null
+++ b/vendor/github.com/golang/snappy/LICENSE
@@ -0,0 +1,27 @@
+Copyright (c) 2011 The Snappy-Go Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/golang/snappy/README b/vendor/github.com/golang/snappy/README
new file mode 100644
index 00000000..cea12879
--- /dev/null
+++ b/vendor/github.com/golang/snappy/README
@@ -0,0 +1,107 @@
+The Snappy compression format in the Go programming language.
+
+To download and install from source:
+$ go get github.com/golang/snappy
+
+Unless otherwise noted, the Snappy-Go source files are distributed
+under the BSD-style license found in the LICENSE file.
+
+
+
+Benchmarks.
+
+The golang/snappy benchmarks include compressing (Z) and decompressing (U) ten
+or so files, the same set used by the C++ Snappy code (github.com/google/snappy
+and note the "google", not "golang"). On an "Intel(R) Core(TM) i7-3770 CPU @
+3.40GHz", Go's GOARCH=amd64 numbers as of 2016-05-29:
+
+"go test -test.bench=."
+
+_UFlat0-8 2.19GB/s ± 0% html
+_UFlat1-8 1.41GB/s ± 0% urls
+_UFlat2-8 23.5GB/s ± 2% jpg
+_UFlat3-8 1.91GB/s ± 0% jpg_200
+_UFlat4-8 14.0GB/s ± 1% pdf
+_UFlat5-8 1.97GB/s ± 0% html4
+_UFlat6-8 814MB/s ± 0% txt1
+_UFlat7-8 785MB/s ± 0% txt2
+_UFlat8-8 857MB/s ± 0% txt3
+_UFlat9-8 719MB/s ± 1% txt4
+_UFlat10-8 2.84GB/s ± 0% pb
+_UFlat11-8 1.05GB/s ± 0% gaviota
+
+_ZFlat0-8 1.04GB/s ± 0% html
+_ZFlat1-8 534MB/s ± 0% urls
+_ZFlat2-8 15.7GB/s ± 1% jpg
+_ZFlat3-8 740MB/s ± 3% jpg_200
+_ZFlat4-8 9.20GB/s ± 1% pdf
+_ZFlat5-8 991MB/s ± 0% html4
+_ZFlat6-8 379MB/s ± 0% txt1
+_ZFlat7-8 352MB/s ± 0% txt2
+_ZFlat8-8 396MB/s ± 1% txt3
+_ZFlat9-8 327MB/s ± 1% txt4
+_ZFlat10-8 1.33GB/s ± 1% pb
+_ZFlat11-8 605MB/s ± 1% gaviota
+
+
+
+"go test -test.bench=. -tags=noasm"
+
+_UFlat0-8 621MB/s ± 2% html
+_UFlat1-8 494MB/s ± 1% urls
+_UFlat2-8 23.2GB/s ± 1% jpg
+_UFlat3-8 1.12GB/s ± 1% jpg_200
+_UFlat4-8 4.35GB/s ± 1% pdf
+_UFlat5-8 609MB/s ± 0% html4
+_UFlat6-8 296MB/s ± 0% txt1
+_UFlat7-8 288MB/s ± 0% txt2
+_UFlat8-8 309MB/s ± 1% txt3
+_UFlat9-8 280MB/s ± 1% txt4
+_UFlat10-8 753MB/s ± 0% pb
+_UFlat11-8 400MB/s ± 0% gaviota
+
+_ZFlat0-8 409MB/s ± 1% html
+_ZFlat1-8 250MB/s ± 1% urls
+_ZFlat2-8 12.3GB/s ± 1% jpg
+_ZFlat3-8 132MB/s ± 0% jpg_200
+_ZFlat4-8 2.92GB/s ± 0% pdf
+_ZFlat5-8 405MB/s ± 1% html4
+_ZFlat6-8 179MB/s ± 1% txt1
+_ZFlat7-8 170MB/s ± 1% txt2
+_ZFlat8-8 189MB/s ± 1% txt3
+_ZFlat9-8 164MB/s ± 1% txt4
+_ZFlat10-8 479MB/s ± 1% pb
+_ZFlat11-8 270MB/s ± 1% gaviota
+
+
+
+For comparison (Go's encoded output is byte-for-byte identical to C++'s), here
+are the numbers from C++ Snappy's
+
+make CXXFLAGS="-O2 -DNDEBUG -g" clean snappy_unittest.log && cat snappy_unittest.log
+
+BM_UFlat/0 2.4GB/s html
+BM_UFlat/1 1.4GB/s urls
+BM_UFlat/2 21.8GB/s jpg
+BM_UFlat/3 1.5GB/s jpg_200
+BM_UFlat/4 13.3GB/s pdf
+BM_UFlat/5 2.1GB/s html4
+BM_UFlat/6 1.0GB/s txt1
+BM_UFlat/7 959.4MB/s txt2
+BM_UFlat/8 1.0GB/s txt3
+BM_UFlat/9 864.5MB/s txt4
+BM_UFlat/10 2.9GB/s pb
+BM_UFlat/11 1.2GB/s gaviota
+
+BM_ZFlat/0 944.3MB/s html (22.31 %)
+BM_ZFlat/1 501.6MB/s urls (47.78 %)
+BM_ZFlat/2 14.3GB/s jpg (99.95 %)
+BM_ZFlat/3 538.3MB/s jpg_200 (73.00 %)
+BM_ZFlat/4 8.3GB/s pdf (83.30 %)
+BM_ZFlat/5 903.5MB/s html4 (22.52 %)
+BM_ZFlat/6 336.0MB/s txt1 (57.88 %)
+BM_ZFlat/7 312.3MB/s txt2 (61.91 %)
+BM_ZFlat/8 353.1MB/s txt3 (54.99 %)
+BM_ZFlat/9 289.9MB/s txt4 (66.26 %)
+BM_ZFlat/10 1.2GB/s pb (19.68 %)
+BM_ZFlat/11 527.4MB/s gaviota (37.72 %)
diff --git a/vendor/github.com/golang/snappy/decode.go b/vendor/github.com/golang/snappy/decode.go
new file mode 100644
index 00000000..72efb035
--- /dev/null
+++ b/vendor/github.com/golang/snappy/decode.go
@@ -0,0 +1,237 @@
+// Copyright 2011 The Snappy-Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package snappy
+
+import (
+ "encoding/binary"
+ "errors"
+ "io"
+)
+
+var (
+ // ErrCorrupt reports that the input is invalid.
+ ErrCorrupt = errors.New("snappy: corrupt input")
+ // ErrTooLarge reports that the uncompressed length is too large.
+ ErrTooLarge = errors.New("snappy: decoded block is too large")
+ // ErrUnsupported reports that the input isn't supported.
+ ErrUnsupported = errors.New("snappy: unsupported input")
+
+ errUnsupportedLiteralLength = errors.New("snappy: unsupported literal length")
+)
+
+// DecodedLen returns the length of the decoded block.
+func DecodedLen(src []byte) (int, error) {
+ v, _, err := decodedLen(src)
+ return v, err
+}
+
+// decodedLen returns the length of the decoded block and the number of bytes
+// that the length header occupied.
+func decodedLen(src []byte) (blockLen, headerLen int, err error) {
+ v, n := binary.Uvarint(src)
+ if n <= 0 || v > 0xffffffff {
+ return 0, 0, ErrCorrupt
+ }
+
+ const wordSize = 32 << (^uint(0) >> 32 & 1)
+ if wordSize == 32 && v > 0x7fffffff {
+ return 0, 0, ErrTooLarge
+ }
+ return int(v), n, nil
+}
+
+const (
+ decodeErrCodeCorrupt = 1
+ decodeErrCodeUnsupportedLiteralLength = 2
+)
+
+// Decode returns the decoded form of src. The returned slice may be a sub-
+// slice of dst if dst was large enough to hold the entire decoded block.
+// Otherwise, a newly allocated slice will be returned.
+//
+// The dst and src must not overlap. It is valid to pass a nil dst.
+func Decode(dst, src []byte) ([]byte, error) {
+ dLen, s, err := decodedLen(src)
+ if err != nil {
+ return nil, err
+ }
+ if dLen <= len(dst) {
+ dst = dst[:dLen]
+ } else {
+ dst = make([]byte, dLen)
+ }
+ switch decode(dst, src[s:]) {
+ case 0:
+ return dst, nil
+ case decodeErrCodeUnsupportedLiteralLength:
+ return nil, errUnsupportedLiteralLength
+ }
+ return nil, ErrCorrupt
+}
+
+// NewReader returns a new Reader that decompresses from r, using the framing
+// format described at
+// https://github.com/google/snappy/blob/master/framing_format.txt
+func NewReader(r io.Reader) *Reader {
+ return &Reader{
+ r: r,
+ decoded: make([]byte, maxBlockSize),
+ buf: make([]byte, maxEncodedLenOfMaxBlockSize+checksumSize),
+ }
+}
+
+// Reader is an io.Reader that can read Snappy-compressed bytes.
+type Reader struct {
+ r io.Reader
+ err error
+ decoded []byte
+ buf []byte
+ // decoded[i:j] contains decoded bytes that have not yet been passed on.
+ i, j int
+ readHeader bool
+}
+
+// Reset discards any buffered data, resets all state, and switches the Snappy
+// reader to read from r. This permits reusing a Reader rather than allocating
+// a new one.
+func (r *Reader) Reset(reader io.Reader) {
+ r.r = reader
+ r.err = nil
+ r.i = 0
+ r.j = 0
+ r.readHeader = false
+}
+
+func (r *Reader) readFull(p []byte, allowEOF bool) (ok bool) {
+ if _, r.err = io.ReadFull(r.r, p); r.err != nil {
+ if r.err == io.ErrUnexpectedEOF || (r.err == io.EOF && !allowEOF) {
+ r.err = ErrCorrupt
+ }
+ return false
+ }
+ return true
+}
+
+// Read satisfies the io.Reader interface.
+func (r *Reader) Read(p []byte) (int, error) {
+ if r.err != nil {
+ return 0, r.err
+ }
+ for {
+ if r.i < r.j {
+ n := copy(p, r.decoded[r.i:r.j])
+ r.i += n
+ return n, nil
+ }
+ if !r.readFull(r.buf[:4], true) {
+ return 0, r.err
+ }
+ chunkType := r.buf[0]
+ if !r.readHeader {
+ if chunkType != chunkTypeStreamIdentifier {
+ r.err = ErrCorrupt
+ return 0, r.err
+ }
+ r.readHeader = true
+ }
+ chunkLen := int(r.buf[1]) | int(r.buf[2])<<8 | int(r.buf[3])<<16
+ if chunkLen > len(r.buf) {
+ r.err = ErrUnsupported
+ return 0, r.err
+ }
+
+ // The chunk types are specified at
+ // https://github.com/google/snappy/blob/master/framing_format.txt
+ switch chunkType {
+ case chunkTypeCompressedData:
+ // Section 4.2. Compressed data (chunk type 0x00).
+ if chunkLen < checksumSize {
+ r.err = ErrCorrupt
+ return 0, r.err
+ }
+ buf := r.buf[:chunkLen]
+ if !r.readFull(buf, false) {
+ return 0, r.err
+ }
+ checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24
+ buf = buf[checksumSize:]
+
+ n, err := DecodedLen(buf)
+ if err != nil {
+ r.err = err
+ return 0, r.err
+ }
+ if n > len(r.decoded) {
+ r.err = ErrCorrupt
+ return 0, r.err
+ }
+ if _, err := Decode(r.decoded, buf); err != nil {
+ r.err = err
+ return 0, r.err
+ }
+ if crc(r.decoded[:n]) != checksum {
+ r.err = ErrCorrupt
+ return 0, r.err
+ }
+ r.i, r.j = 0, n
+ continue
+
+ case chunkTypeUncompressedData:
+ // Section 4.3. Uncompressed data (chunk type 0x01).
+ if chunkLen < checksumSize {
+ r.err = ErrCorrupt
+ return 0, r.err
+ }
+ buf := r.buf[:checksumSize]
+ if !r.readFull(buf, false) {
+ return 0, r.err
+ }
+ checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24
+ // Read directly into r.decoded instead of via r.buf.
+ n := chunkLen - checksumSize
+ if n > len(r.decoded) {
+ r.err = ErrCorrupt
+ return 0, r.err
+ }
+ if !r.readFull(r.decoded[:n], false) {
+ return 0, r.err
+ }
+ if crc(r.decoded[:n]) != checksum {
+ r.err = ErrCorrupt
+ return 0, r.err
+ }
+ r.i, r.j = 0, n
+ continue
+
+ case chunkTypeStreamIdentifier:
+ // Section 4.1. Stream identifier (chunk type 0xff).
+ if chunkLen != len(magicBody) {
+ r.err = ErrCorrupt
+ return 0, r.err
+ }
+ if !r.readFull(r.buf[:len(magicBody)], false) {
+ return 0, r.err
+ }
+ for i := 0; i < len(magicBody); i++ {
+ if r.buf[i] != magicBody[i] {
+ r.err = ErrCorrupt
+ return 0, r.err
+ }
+ }
+ continue
+ }
+
+ if chunkType <= 0x7f {
+ // Section 4.5. Reserved unskippable chunks (chunk types 0x02-0x7f).
+ r.err = ErrUnsupported
+ return 0, r.err
+ }
+ // Section 4.4 Padding (chunk type 0xfe).
+ // Section 4.6. Reserved skippable chunks (chunk types 0x80-0xfd).
+ if !r.readFull(r.buf[:chunkLen], false) {
+ return 0, r.err
+ }
+ }
+}
diff --git a/vendor/github.com/golang/snappy/decode_amd64.go b/vendor/github.com/golang/snappy/decode_amd64.go
new file mode 100644
index 00000000..fcd192b8
--- /dev/null
+++ b/vendor/github.com/golang/snappy/decode_amd64.go
@@ -0,0 +1,14 @@
+// Copyright 2016 The Snappy-Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !appengine
+// +build gc
+// +build !noasm
+
+package snappy
+
+// decode has the same semantics as in decode_other.go.
+//
+//go:noescape
+func decode(dst, src []byte) int
diff --git a/vendor/github.com/golang/snappy/decode_amd64.s b/vendor/github.com/golang/snappy/decode_amd64.s
new file mode 100644
index 00000000..e6179f65
--- /dev/null
+++ b/vendor/github.com/golang/snappy/decode_amd64.s
@@ -0,0 +1,490 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !appengine
+// +build gc
+// +build !noasm
+
+#include "textflag.h"
+
+// The asm code generally follows the pure Go code in decode_other.go, except
+// where marked with a "!!!".
+
+// func decode(dst, src []byte) int
+//
+// All local variables fit into registers. The non-zero stack size is only to
+// spill registers and push args when issuing a CALL. The register allocation:
+// - AX scratch
+// - BX scratch
+// - CX length or x
+// - DX offset
+// - SI &src[s]
+// - DI &dst[d]
+// + R8 dst_base
+// + R9 dst_len
+// + R10 dst_base + dst_len
+// + R11 src_base
+// + R12 src_len
+// + R13 src_base + src_len
+// - R14 used by doCopy
+// - R15 used by doCopy
+//
+// The registers R8-R13 (marked with a "+") are set at the start of the
+// function, and after a CALL returns, and are not otherwise modified.
+//
+// The d variable is implicitly DI - R8, and len(dst)-d is R10 - DI.
+// The s variable is implicitly SI - R11, and len(src)-s is R13 - SI.
+TEXT ·decode(SB), NOSPLIT, $48-56
+ // Initialize SI, DI and R8-R13.
+ MOVQ dst_base+0(FP), R8
+ MOVQ dst_len+8(FP), R9
+ MOVQ R8, DI
+ MOVQ R8, R10
+ ADDQ R9, R10
+ MOVQ src_base+24(FP), R11
+ MOVQ src_len+32(FP), R12
+ MOVQ R11, SI
+ MOVQ R11, R13
+ ADDQ R12, R13
+
+loop:
+ // for s < len(src)
+ CMPQ SI, R13
+ JEQ end
+
+ // CX = uint32(src[s])
+ //
+ // switch src[s] & 0x03
+ MOVBLZX (SI), CX
+ MOVL CX, BX
+ ANDL $3, BX
+ CMPL BX, $1
+ JAE tagCopy
+
+ // ----------------------------------------
+ // The code below handles literal tags.
+
+ // case tagLiteral:
+ // x := uint32(src[s] >> 2)
+ // switch
+ SHRL $2, CX
+ CMPL CX, $60
+ JAE tagLit60Plus
+
+ // case x < 60:
+ // s++
+ INCQ SI
+
+doLit:
+ // This is the end of the inner "switch", when we have a literal tag.
+ //
+ // We assume that CX == x and x fits in a uint32, where x is the variable
+ // used in the pure Go decode_other.go code.
+
+ // length = int(x) + 1
+ //
+ // Unlike the pure Go code, we don't need to check if length <= 0 because
+ // CX can hold 64 bits, so the increment cannot overflow.
+ INCQ CX
+
+ // Prepare to check if copying length bytes will run past the end of dst or
+ // src.
+ //
+ // AX = len(dst) - d
+ // BX = len(src) - s
+ MOVQ R10, AX
+ SUBQ DI, AX
+ MOVQ R13, BX
+ SUBQ SI, BX
+
+ // !!! Try a faster technique for short (16 or fewer bytes) copies.
+ //
+ // if length > 16 || len(dst)-d < 16 || len(src)-s < 16 {
+ // goto callMemmove // Fall back on calling runtime·memmove.
+ // }
+ //
+ // The C++ snappy code calls this TryFastAppend. It also checks len(src)-s
+ // against 21 instead of 16, because it cannot assume that all of its input
+ // is contiguous in memory and so it needs to leave enough source bytes to
+ // read the next tag without refilling buffers, but Go's Decode assumes
+ // contiguousness (the src argument is a []byte).
+ CMPQ CX, $16
+ JGT callMemmove
+ CMPQ AX, $16
+ JLT callMemmove
+ CMPQ BX, $16
+ JLT callMemmove
+
+ // !!! Implement the copy from src to dst as a 16-byte load and store.
+ // (Decode's documentation says that dst and src must not overlap.)
+ //
+ // This always copies 16 bytes, instead of only length bytes, but that's
+ // OK. If the input is a valid Snappy encoding then subsequent iterations
+ // will fix up the overrun. Otherwise, Decode returns a nil []byte (and a
+ // non-nil error), so the overrun will be ignored.
+ //
+ // Note that on amd64, it is legal and cheap to issue unaligned 8-byte or
+ // 16-byte loads and stores. This technique probably wouldn't be as
+ // effective on architectures that are fussier about alignment.
+ MOVOU 0(SI), X0
+ MOVOU X0, 0(DI)
+
+ // d += length
+ // s += length
+ ADDQ CX, DI
+ ADDQ CX, SI
+ JMP loop
+
+callMemmove:
+ // if length > len(dst)-d || length > len(src)-s { etc }
+ CMPQ CX, AX
+ JGT errCorrupt
+ CMPQ CX, BX
+ JGT errCorrupt
+
+ // copy(dst[d:], src[s:s+length])
+ //
+ // This means calling runtime·memmove(&dst[d], &src[s], length), so we push
+ // DI, SI and CX as arguments. Coincidentally, we also need to spill those
+ // three registers to the stack, to save local variables across the CALL.
+ MOVQ DI, 0(SP)
+ MOVQ SI, 8(SP)
+ MOVQ CX, 16(SP)
+ MOVQ DI, 24(SP)
+ MOVQ SI, 32(SP)
+ MOVQ CX, 40(SP)
+ CALL runtime·memmove(SB)
+
+ // Restore local variables: unspill registers from the stack and
+ // re-calculate R8-R13.
+ MOVQ 24(SP), DI
+ MOVQ 32(SP), SI
+ MOVQ 40(SP), CX
+ MOVQ dst_base+0(FP), R8
+ MOVQ dst_len+8(FP), R9
+ MOVQ R8, R10
+ ADDQ R9, R10
+ MOVQ src_base+24(FP), R11
+ MOVQ src_len+32(FP), R12
+ MOVQ R11, R13
+ ADDQ R12, R13
+
+ // d += length
+ // s += length
+ ADDQ CX, DI
+ ADDQ CX, SI
+ JMP loop
+
+tagLit60Plus:
+ // !!! This fragment does the
+ //
+ // s += x - 58; if uint(s) > uint(len(src)) { etc }
+ //
+ // checks. In the asm version, we code it once instead of once per switch case.
+ ADDQ CX, SI
+ SUBQ $58, SI
+ MOVQ SI, BX
+ SUBQ R11, BX
+ CMPQ BX, R12
+ JA errCorrupt
+
+ // case x == 60:
+ CMPL CX, $61
+ JEQ tagLit61
+ JA tagLit62Plus
+
+ // x = uint32(src[s-1])
+ MOVBLZX -1(SI), CX
+ JMP doLit
+
+tagLit61:
+ // case x == 61:
+ // x = uint32(src[s-2]) | uint32(src[s-1])<<8
+ MOVWLZX -2(SI), CX
+ JMP doLit
+
+tagLit62Plus:
+ CMPL CX, $62
+ JA tagLit63
+
+ // case x == 62:
+ // x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16
+ MOVWLZX -3(SI), CX
+ MOVBLZX -1(SI), BX
+ SHLL $16, BX
+ ORL BX, CX
+ JMP doLit
+
+tagLit63:
+ // case x == 63:
+ // x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24
+ MOVL -4(SI), CX
+ JMP doLit
+
+// The code above handles literal tags.
+// ----------------------------------------
+// The code below handles copy tags.
+
+tagCopy4:
+ // case tagCopy4:
+ // s += 5
+ ADDQ $5, SI
+
+ // if uint(s) > uint(len(src)) { etc }
+ MOVQ SI, BX
+ SUBQ R11, BX
+ CMPQ BX, R12
+ JA errCorrupt
+
+ // length = 1 + int(src[s-5])>>2
+ SHRQ $2, CX
+ INCQ CX
+
+ // offset = int(uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24)
+ MOVLQZX -4(SI), DX
+ JMP doCopy
+
+tagCopy2:
+ // case tagCopy2:
+ // s += 3
+ ADDQ $3, SI
+
+ // if uint(s) > uint(len(src)) { etc }
+ MOVQ SI, BX
+ SUBQ R11, BX
+ CMPQ BX, R12
+ JA errCorrupt
+
+ // length = 1 + int(src[s-3])>>2
+ SHRQ $2, CX
+ INCQ CX
+
+ // offset = int(uint32(src[s-2]) | uint32(src[s-1])<<8)
+ MOVWQZX -2(SI), DX
+ JMP doCopy
+
+tagCopy:
+ // We have a copy tag. We assume that:
+ // - BX == src[s] & 0x03
+ // - CX == src[s]
+ CMPQ BX, $2
+ JEQ tagCopy2
+ JA tagCopy4
+
+ // case tagCopy1:
+ // s += 2
+ ADDQ $2, SI
+
+ // if uint(s) > uint(len(src)) { etc }
+ MOVQ SI, BX
+ SUBQ R11, BX
+ CMPQ BX, R12
+ JA errCorrupt
+
+ // offset = int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1]))
+ MOVQ CX, DX
+ ANDQ $0xe0, DX
+ SHLQ $3, DX
+ MOVBQZX -1(SI), BX
+ ORQ BX, DX
+
+ // length = 4 + int(src[s-2])>>2&0x7
+ SHRQ $2, CX
+ ANDQ $7, CX
+ ADDQ $4, CX
+
+doCopy:
+ // This is the end of the outer "switch", when we have a copy tag.
+ //
+ // We assume that:
+ // - CX == length && CX > 0
+ // - DX == offset
+
+ // if offset <= 0 { etc }
+ CMPQ DX, $0
+ JLE errCorrupt
+
+ // if d < offset { etc }
+ MOVQ DI, BX
+ SUBQ R8, BX
+ CMPQ BX, DX
+ JLT errCorrupt
+
+ // if length > len(dst)-d { etc }
+ MOVQ R10, BX
+ SUBQ DI, BX
+ CMPQ CX, BX
+ JGT errCorrupt
+
+ // forwardCopy(dst[d:d+length], dst[d-offset:]); d += length
+ //
+ // Set:
+ // - R14 = len(dst)-d
+ // - R15 = &dst[d-offset]
+ MOVQ R10, R14
+ SUBQ DI, R14
+ MOVQ DI, R15
+ SUBQ DX, R15
+
+ // !!! Try a faster technique for short (16 or fewer bytes) forward copies.
+ //
+ // First, try using two 8-byte load/stores, similar to the doLit technique
+ // above. Even if dst[d:d+length] and dst[d-offset:] can overlap, this is
+ // still OK if offset >= 8. Note that this has to be two 8-byte load/stores
+ // and not one 16-byte load/store, and the first store has to be before the
+ // second load, due to the overlap if offset is in the range [8, 16).
+ //
+ // if length > 16 || offset < 8 || len(dst)-d < 16 {
+ // goto slowForwardCopy
+ // }
+ // copy 16 bytes
+ // d += length
+ CMPQ CX, $16
+ JGT slowForwardCopy
+ CMPQ DX, $8
+ JLT slowForwardCopy
+ CMPQ R14, $16
+ JLT slowForwardCopy
+ MOVQ 0(R15), AX
+ MOVQ AX, 0(DI)
+ MOVQ 8(R15), BX
+ MOVQ BX, 8(DI)
+ ADDQ CX, DI
+ JMP loop
+
+slowForwardCopy:
+ // !!! If the forward copy is longer than 16 bytes, or if offset < 8, we
+ // can still try 8-byte load stores, provided we can overrun up to 10 extra
+ // bytes. As above, the overrun will be fixed up by subsequent iterations
+ // of the outermost loop.
+ //
+ // The C++ snappy code calls this technique IncrementalCopyFastPath. Its
+ // commentary says:
+ //
+ // ----
+ //
+ // The main part of this loop is a simple copy of eight bytes at a time
+ // until we've copied (at least) the requested amount of bytes. However,
+ // if d and d-offset are less than eight bytes apart (indicating a
+ // repeating pattern of length < 8), we first need to expand the pattern in
+ // order to get the correct results. For instance, if the buffer looks like
+ // this, with the eight-byte and patterns marked as
+ // intervals:
+ //
+ // abxxxxxxxxxxxx
+ // [------] d-offset
+ // [------] d
+ //
+ // a single eight-byte copy from to will repeat the pattern
+ // once, after which we can move two bytes without moving :
+ //
+ // ababxxxxxxxxxx
+ // [------] d-offset
+ // [------] d
+ //
+ // and repeat the exercise until the two no longer overlap.
+ //
+ // This allows us to do very well in the special case of one single byte
+ // repeated many times, without taking a big hit for more general cases.
+ //
+ // The worst case of extra writing past the end of the match occurs when
+ // offset == 1 and length == 1; the last copy will read from byte positions
+ // [0..7] and write to [4..11], whereas it was only supposed to write to
+ // position 1. Thus, ten excess bytes.
+ //
+ // ----
+ //
+ // That "10 byte overrun" worst case is confirmed by Go's
+ // TestSlowForwardCopyOverrun, which also tests the fixUpSlowForwardCopy
+ // and finishSlowForwardCopy algorithm.
+ //
+ // if length > len(dst)-d-10 {
+ // goto verySlowForwardCopy
+ // }
+ SUBQ $10, R14
+ CMPQ CX, R14
+ JGT verySlowForwardCopy
+
+makeOffsetAtLeast8:
+ // !!! As above, expand the pattern so that offset >= 8 and we can use
+ // 8-byte load/stores.
+ //
+ // for offset < 8 {
+ // copy 8 bytes from dst[d-offset:] to dst[d:]
+ // length -= offset
+ // d += offset
+ // offset += offset
+ // // The two previous lines together means that d-offset, and therefore
+ // // R15, is unchanged.
+ // }
+ CMPQ DX, $8
+ JGE fixUpSlowForwardCopy
+ MOVQ (R15), BX
+ MOVQ BX, (DI)
+ SUBQ DX, CX
+ ADDQ DX, DI
+ ADDQ DX, DX
+ JMP makeOffsetAtLeast8
+
+fixUpSlowForwardCopy:
+ // !!! Add length (which might be negative now) to d (implied by DI being
+ // &dst[d]) so that d ends up at the right place when we jump back to the
+ // top of the loop. Before we do that, though, we save DI to AX so that, if
+ // length is positive, copying the remaining length bytes will write to the
+ // right place.
+ MOVQ DI, AX
+ ADDQ CX, DI
+
+finishSlowForwardCopy:
+ // !!! Repeat 8-byte load/stores until length <= 0. Ending with a negative
+ // length means that we overrun, but as above, that will be fixed up by
+ // subsequent iterations of the outermost loop.
+ CMPQ CX, $0
+ JLE loop
+ MOVQ (R15), BX
+ MOVQ BX, (AX)
+ ADDQ $8, R15
+ ADDQ $8, AX
+ SUBQ $8, CX
+ JMP finishSlowForwardCopy
+
+verySlowForwardCopy:
+ // verySlowForwardCopy is a simple implementation of forward copy. In C
+ // parlance, this is a do/while loop instead of a while loop, since we know
+ // that length > 0. In Go syntax:
+ //
+ // for {
+ // dst[d] = dst[d - offset]
+ // d++
+ // length--
+ // if length == 0 {
+ // break
+ // }
+ // }
+ MOVB (R15), BX
+ MOVB BX, (DI)
+ INCQ R15
+ INCQ DI
+ DECQ CX
+ JNZ verySlowForwardCopy
+ JMP loop
+
+// The code above handles copy tags.
+// ----------------------------------------
+
+end:
+ // This is the end of the "for s < len(src)".
+ //
+ // if d != len(dst) { etc }
+ CMPQ DI, R10
+ JNE errCorrupt
+
+ // return 0
+ MOVQ $0, ret+48(FP)
+ RET
+
+errCorrupt:
+ // return decodeErrCodeCorrupt
+ MOVQ $1, ret+48(FP)
+ RET
diff --git a/vendor/github.com/golang/snappy/decode_other.go b/vendor/github.com/golang/snappy/decode_other.go
new file mode 100644
index 00000000..8c9f2049
--- /dev/null
+++ b/vendor/github.com/golang/snappy/decode_other.go
@@ -0,0 +1,101 @@
+// Copyright 2016 The Snappy-Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !amd64 appengine !gc noasm
+
+package snappy
+
+// decode writes the decoding of src to dst. It assumes that the varint-encoded
+// length of the decompressed bytes has already been read, and that len(dst)
+// equals that length.
+//
+// It returns 0 on success or a decodeErrCodeXxx error code on failure.
+func decode(dst, src []byte) int {
+ var d, s, offset, length int
+ for s < len(src) {
+ switch src[s] & 0x03 {
+ case tagLiteral:
+ x := uint32(src[s] >> 2)
+ switch {
+ case x < 60:
+ s++
+ case x == 60:
+ s += 2
+ if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
+ return decodeErrCodeCorrupt
+ }
+ x = uint32(src[s-1])
+ case x == 61:
+ s += 3
+ if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
+ return decodeErrCodeCorrupt
+ }
+ x = uint32(src[s-2]) | uint32(src[s-1])<<8
+ case x == 62:
+ s += 4
+ if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
+ return decodeErrCodeCorrupt
+ }
+ x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16
+ case x == 63:
+ s += 5
+ if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
+ return decodeErrCodeCorrupt
+ }
+ x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24
+ }
+ length = int(x) + 1
+ if length <= 0 {
+ return decodeErrCodeUnsupportedLiteralLength
+ }
+ if length > len(dst)-d || length > len(src)-s {
+ return decodeErrCodeCorrupt
+ }
+ copy(dst[d:], src[s:s+length])
+ d += length
+ s += length
+ continue
+
+ case tagCopy1:
+ s += 2
+ if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
+ return decodeErrCodeCorrupt
+ }
+ length = 4 + int(src[s-2])>>2&0x7
+ offset = int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1]))
+
+ case tagCopy2:
+ s += 3
+ if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
+ return decodeErrCodeCorrupt
+ }
+ length = 1 + int(src[s-3])>>2
+ offset = int(uint32(src[s-2]) | uint32(src[s-1])<<8)
+
+ case tagCopy4:
+ s += 5
+ if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
+ return decodeErrCodeCorrupt
+ }
+ length = 1 + int(src[s-5])>>2
+ offset = int(uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24)
+ }
+
+ if offset <= 0 || d < offset || length > len(dst)-d {
+ return decodeErrCodeCorrupt
+ }
+ // Copy from an earlier sub-slice of dst to a later sub-slice. Unlike
+ // the built-in copy function, this byte-by-byte copy always runs
+ // forwards, even if the slices overlap. Conceptually, this is:
+ //
+ // d += forwardCopy(dst[d:d+length], dst[d-offset:])
+ for end := d + length; d != end; d++ {
+ dst[d] = dst[d-offset]
+ }
+ }
+ if d != len(dst) {
+ return decodeErrCodeCorrupt
+ }
+ return 0
+}
diff --git a/vendor/github.com/golang/snappy/encode.go b/vendor/github.com/golang/snappy/encode.go
new file mode 100644
index 00000000..8d393e90
--- /dev/null
+++ b/vendor/github.com/golang/snappy/encode.go
@@ -0,0 +1,285 @@
+// Copyright 2011 The Snappy-Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package snappy
+
+import (
+ "encoding/binary"
+ "errors"
+ "io"
+)
+
+// Encode returns the encoded form of src. The returned slice may be a sub-
+// slice of dst if dst was large enough to hold the entire encoded block.
+// Otherwise, a newly allocated slice will be returned.
+//
+// The dst and src must not overlap. It is valid to pass a nil dst.
+func Encode(dst, src []byte) []byte {
+ if n := MaxEncodedLen(len(src)); n < 0 {
+ panic(ErrTooLarge)
+ } else if len(dst) < n {
+ dst = make([]byte, n)
+ }
+
+ // The block starts with the varint-encoded length of the decompressed bytes.
+ d := binary.PutUvarint(dst, uint64(len(src)))
+
+ for len(src) > 0 {
+ p := src
+ src = nil
+ if len(p) > maxBlockSize {
+ p, src = p[:maxBlockSize], p[maxBlockSize:]
+ }
+ if len(p) < minNonLiteralBlockSize {
+ d += emitLiteral(dst[d:], p)
+ } else {
+ d += encodeBlock(dst[d:], p)
+ }
+ }
+ return dst[:d]
+}
+
+// inputMargin is the minimum number of extra input bytes to keep, inside
+// encodeBlock's inner loop. On some architectures, this margin lets us
+// implement a fast path for emitLiteral, where the copy of short (<= 16 byte)
+// literals can be implemented as a single load to and store from a 16-byte
+// register. That literal's actual length can be as short as 1 byte, so this
+// can copy up to 15 bytes too much, but that's OK as subsequent iterations of
+// the encoding loop will fix up the copy overrun, and this inputMargin ensures
+// that we don't overrun the dst and src buffers.
+const inputMargin = 16 - 1
+
+// minNonLiteralBlockSize is the minimum size of the input to encodeBlock that
+// could be encoded with a copy tag. This is the minimum with respect to the
+// algorithm used by encodeBlock, not a minimum enforced by the file format.
+//
+// The encoded output must start with at least a 1 byte literal, as there are
+// no previous bytes to copy. A minimal (1 byte) copy after that, generated
+// from an emitCopy call in encodeBlock's main loop, would require at least
+// another inputMargin bytes, for the reason above: we want any emitLiteral
+// calls inside encodeBlock's main loop to use the fast path if possible, which
+// requires being able to overrun by inputMargin bytes. Thus,
+// minNonLiteralBlockSize equals 1 + 1 + inputMargin.
+//
+// The C++ code doesn't use this exact threshold, but it could, as discussed at
+// https://groups.google.com/d/topic/snappy-compression/oGbhsdIJSJ8/discussion
+// The difference between Go (2+inputMargin) and C++ (inputMargin) is purely an
+// optimization. It should not affect the encoded form. This is tested by
+// TestSameEncodingAsCppShortCopies.
+const minNonLiteralBlockSize = 1 + 1 + inputMargin
+
+// MaxEncodedLen returns the maximum length of a snappy block, given its
+// uncompressed length.
+//
+// It will return a negative value if srcLen is too large to encode.
+func MaxEncodedLen(srcLen int) int {
+ n := uint64(srcLen)
+ if n > 0xffffffff {
+ return -1
+ }
+ // Compressed data can be defined as:
+ // compressed := item* literal*
+ // item := literal* copy
+ //
+ // The trailing literal sequence has a space blowup of at most 62/60
+ // since a literal of length 60 needs one tag byte + one extra byte
+ // for length information.
+ //
+ // Item blowup is trickier to measure. Suppose the "copy" op copies
+ // 4 bytes of data. Because of a special check in the encoding code,
+ // we produce a 4-byte copy only if the offset is < 65536. Therefore
+ // the copy op takes 3 bytes to encode, and this type of item leads
+ // to at most the 62/60 blowup for representing literals.
+ //
+ // Suppose the "copy" op copies 5 bytes of data. If the offset is big
+ // enough, it will take 5 bytes to encode the copy op. Therefore the
+ // worst case here is a one-byte literal followed by a five-byte copy.
+ // That is, 6 bytes of input turn into 7 bytes of "compressed" data.
+ //
+ // This last factor dominates the blowup, so the final estimate is:
+ n = 32 + n + n/6
+ if n > 0xffffffff {
+ return -1
+ }
+ return int(n)
+}
+
+var errClosed = errors.New("snappy: Writer is closed")
+
+// NewWriter returns a new Writer that compresses to w.
+//
+// The Writer returned does not buffer writes. There is no need to Flush or
+// Close such a Writer.
+//
+// Deprecated: the Writer returned is not suitable for many small writes, only
+// for few large writes. Use NewBufferedWriter instead, which is efficient
+// regardless of the frequency and shape of the writes, and remember to Close
+// that Writer when done.
+func NewWriter(w io.Writer) *Writer {
+ return &Writer{
+ w: w,
+ obuf: make([]byte, obufLen),
+ }
+}
+
+// NewBufferedWriter returns a new Writer that compresses to w, using the
+// framing format described at
+// https://github.com/google/snappy/blob/master/framing_format.txt
+//
+// The Writer returned buffers writes. Users must call Close to guarantee all
+// data has been forwarded to the underlying io.Writer. They may also call
+// Flush zero or more times before calling Close.
+func NewBufferedWriter(w io.Writer) *Writer {
+ return &Writer{
+ w: w,
+ ibuf: make([]byte, 0, maxBlockSize),
+ obuf: make([]byte, obufLen),
+ }
+}
+
+// Writer is an io.Writer that can write Snappy-compressed bytes.
+type Writer struct {
+ w io.Writer
+ err error
+
+ // ibuf is a buffer for the incoming (uncompressed) bytes.
+ //
+ // Its use is optional. For backwards compatibility, Writers created by the
+ // NewWriter function have ibuf == nil, do not buffer incoming bytes, and
+ // therefore do not need to be Flush'ed or Close'd.
+ ibuf []byte
+
+ // obuf is a buffer for the outgoing (compressed) bytes.
+ obuf []byte
+
+ // wroteStreamHeader is whether we have written the stream header.
+ wroteStreamHeader bool
+}
+
+// Reset discards the writer's state and switches the Snappy writer to write to
+// w. This permits reusing a Writer rather than allocating a new one.
+func (w *Writer) Reset(writer io.Writer) {
+ w.w = writer
+ w.err = nil
+ if w.ibuf != nil {
+ w.ibuf = w.ibuf[:0]
+ }
+ w.wroteStreamHeader = false
+}
+
+// Write satisfies the io.Writer interface.
+func (w *Writer) Write(p []byte) (nRet int, errRet error) {
+ if w.ibuf == nil {
+ // Do not buffer incoming bytes. This does not perform or compress well
+ // if the caller of Writer.Write writes many small slices. This
+ // behavior is therefore deprecated, but still supported for backwards
+ // compatibility with code that doesn't explicitly Flush or Close.
+ return w.write(p)
+ }
+
+ // The remainder of this method is based on bufio.Writer.Write from the
+ // standard library.
+
+ for len(p) > (cap(w.ibuf)-len(w.ibuf)) && w.err == nil {
+ var n int
+ if len(w.ibuf) == 0 {
+ // Large write, empty buffer.
+ // Write directly from p to avoid copy.
+ n, _ = w.write(p)
+ } else {
+ n = copy(w.ibuf[len(w.ibuf):cap(w.ibuf)], p)
+ w.ibuf = w.ibuf[:len(w.ibuf)+n]
+ w.Flush()
+ }
+ nRet += n
+ p = p[n:]
+ }
+ if w.err != nil {
+ return nRet, w.err
+ }
+ n := copy(w.ibuf[len(w.ibuf):cap(w.ibuf)], p)
+ w.ibuf = w.ibuf[:len(w.ibuf)+n]
+ nRet += n
+ return nRet, nil
+}
+
+func (w *Writer) write(p []byte) (nRet int, errRet error) {
+ if w.err != nil {
+ return 0, w.err
+ }
+ for len(p) > 0 {
+ obufStart := len(magicChunk)
+ if !w.wroteStreamHeader {
+ w.wroteStreamHeader = true
+ copy(w.obuf, magicChunk)
+ obufStart = 0
+ }
+
+ var uncompressed []byte
+ if len(p) > maxBlockSize {
+ uncompressed, p = p[:maxBlockSize], p[maxBlockSize:]
+ } else {
+ uncompressed, p = p, nil
+ }
+ checksum := crc(uncompressed)
+
+ // Compress the buffer, discarding the result if the improvement
+ // isn't at least 12.5%.
+ compressed := Encode(w.obuf[obufHeaderLen:], uncompressed)
+ chunkType := uint8(chunkTypeCompressedData)
+ chunkLen := 4 + len(compressed)
+ obufEnd := obufHeaderLen + len(compressed)
+ if len(compressed) >= len(uncompressed)-len(uncompressed)/8 {
+ chunkType = chunkTypeUncompressedData
+ chunkLen = 4 + len(uncompressed)
+ obufEnd = obufHeaderLen
+ }
+
+ // Fill in the per-chunk header that comes before the body.
+ w.obuf[len(magicChunk)+0] = chunkType
+ w.obuf[len(magicChunk)+1] = uint8(chunkLen >> 0)
+ w.obuf[len(magicChunk)+2] = uint8(chunkLen >> 8)
+ w.obuf[len(magicChunk)+3] = uint8(chunkLen >> 16)
+ w.obuf[len(magicChunk)+4] = uint8(checksum >> 0)
+ w.obuf[len(magicChunk)+5] = uint8(checksum >> 8)
+ w.obuf[len(magicChunk)+6] = uint8(checksum >> 16)
+ w.obuf[len(magicChunk)+7] = uint8(checksum >> 24)
+
+ if _, err := w.w.Write(w.obuf[obufStart:obufEnd]); err != nil {
+ w.err = err
+ return nRet, err
+ }
+ if chunkType == chunkTypeUncompressedData {
+ if _, err := w.w.Write(uncompressed); err != nil {
+ w.err = err
+ return nRet, err
+ }
+ }
+ nRet += len(uncompressed)
+ }
+ return nRet, nil
+}
+
+// Flush flushes the Writer to its underlying io.Writer.
+func (w *Writer) Flush() error {
+ if w.err != nil {
+ return w.err
+ }
+ if len(w.ibuf) == 0 {
+ return nil
+ }
+ w.write(w.ibuf)
+ w.ibuf = w.ibuf[:0]
+ return w.err
+}
+
+// Close calls Flush and then closes the Writer.
+func (w *Writer) Close() error {
+ w.Flush()
+ ret := w.err
+ if w.err == nil {
+ w.err = errClosed
+ }
+ return ret
+}
diff --git a/vendor/github.com/golang/snappy/encode_amd64.go b/vendor/github.com/golang/snappy/encode_amd64.go
new file mode 100644
index 00000000..150d91bc
--- /dev/null
+++ b/vendor/github.com/golang/snappy/encode_amd64.go
@@ -0,0 +1,29 @@
+// Copyright 2016 The Snappy-Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !appengine
+// +build gc
+// +build !noasm
+
+package snappy
+
+// emitLiteral has the same semantics as in encode_other.go.
+//
+//go:noescape
+func emitLiteral(dst, lit []byte) int
+
+// emitCopy has the same semantics as in encode_other.go.
+//
+//go:noescape
+func emitCopy(dst []byte, offset, length int) int
+
+// extendMatch has the same semantics as in encode_other.go.
+//
+//go:noescape
+func extendMatch(src []byte, i, j int) int
+
+// encodeBlock has the same semantics as in encode_other.go.
+//
+//go:noescape
+func encodeBlock(dst, src []byte) (d int)
diff --git a/vendor/github.com/golang/snappy/encode_amd64.s b/vendor/github.com/golang/snappy/encode_amd64.s
new file mode 100644
index 00000000..adfd979f
--- /dev/null
+++ b/vendor/github.com/golang/snappy/encode_amd64.s
@@ -0,0 +1,730 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !appengine
+// +build gc
+// +build !noasm
+
+#include "textflag.h"
+
+// The XXX lines assemble on Go 1.4, 1.5 and 1.7, but not 1.6, due to a
+// Go toolchain regression. See https://github.com/golang/go/issues/15426 and
+// https://github.com/golang/snappy/issues/29
+//
+// As a workaround, the package was built with a known good assembler, and
+// those instructions were disassembled by "objdump -d" to yield the
+// 4e 0f b7 7c 5c 78 movzwq 0x78(%rsp,%r11,2),%r15
+// style comments, in AT&T asm syntax. Note that rsp here is a physical
+// register, not Go/asm's SP pseudo-register (see https://golang.org/doc/asm).
+// The instructions were then encoded as "BYTE $0x.." sequences, which assemble
+// fine on Go 1.6.
+
+// The asm code generally follows the pure Go code in encode_other.go, except
+// where marked with a "!!!".
+
+// ----------------------------------------------------------------------------
+
+// func emitLiteral(dst, lit []byte) int
+//
+// All local variables fit into registers. The register allocation:
+// - AX len(lit)
+// - BX n
+// - DX return value
+// - DI &dst[i]
+// - R10 &lit[0]
+//
+// The 24 bytes of stack space is to call runtime·memmove.
+//
+// The unusual register allocation of local variables, such as R10 for the
+// source pointer, matches the allocation used at the call site in encodeBlock,
+// which makes it easier to manually inline this function.
+TEXT ·emitLiteral(SB), NOSPLIT, $24-56
+ MOVQ dst_base+0(FP), DI
+ MOVQ lit_base+24(FP), R10
+ MOVQ lit_len+32(FP), AX
+ MOVQ AX, DX
+ MOVL AX, BX
+ SUBL $1, BX
+
+ CMPL BX, $60
+ JLT oneByte
+ CMPL BX, $256
+ JLT twoBytes
+
+threeBytes:
+ MOVB $0xf4, 0(DI)
+ MOVW BX, 1(DI)
+ ADDQ $3, DI
+ ADDQ $3, DX
+ JMP memmove
+
+twoBytes:
+ MOVB $0xf0, 0(DI)
+ MOVB BX, 1(DI)
+ ADDQ $2, DI
+ ADDQ $2, DX
+ JMP memmove
+
+oneByte:
+ SHLB $2, BX
+ MOVB BX, 0(DI)
+ ADDQ $1, DI
+ ADDQ $1, DX
+
+memmove:
+ MOVQ DX, ret+48(FP)
+
+ // copy(dst[i:], lit)
+ //
+ // This means calling runtime·memmove(&dst[i], &lit[0], len(lit)), so we push
+ // DI, R10 and AX as arguments.
+ MOVQ DI, 0(SP)
+ MOVQ R10, 8(SP)
+ MOVQ AX, 16(SP)
+ CALL runtime·memmove(SB)
+ RET
+
+// ----------------------------------------------------------------------------
+
+// func emitCopy(dst []byte, offset, length int) int
+//
+// All local variables fit into registers. The register allocation:
+// - AX length
+// - SI &dst[0]
+// - DI &dst[i]
+// - R11 offset
+//
+// The unusual register allocation of local variables, such as R11 for the
+// offset, matches the allocation used at the call site in encodeBlock, which
+// makes it easier to manually inline this function.
+TEXT ·emitCopy(SB), NOSPLIT, $0-48
+ MOVQ dst_base+0(FP), DI
+ MOVQ DI, SI
+ MOVQ offset+24(FP), R11
+ MOVQ length+32(FP), AX
+
+loop0:
+ // for length >= 68 { etc }
+ CMPL AX, $68
+ JLT step1
+
+ // Emit a length 64 copy, encoded as 3 bytes.
+ MOVB $0xfe, 0(DI)
+ MOVW R11, 1(DI)
+ ADDQ $3, DI
+ SUBL $64, AX
+ JMP loop0
+
+step1:
+ // if length > 64 { etc }
+ CMPL AX, $64
+ JLE step2
+
+ // Emit a length 60 copy, encoded as 3 bytes.
+ MOVB $0xee, 0(DI)
+ MOVW R11, 1(DI)
+ ADDQ $3, DI
+ SUBL $60, AX
+
+step2:
+ // if length >= 12 || offset >= 2048 { goto step3 }
+ CMPL AX, $12
+ JGE step3
+ CMPL R11, $2048
+ JGE step3
+
+ // Emit the remaining copy, encoded as 2 bytes.
+ MOVB R11, 1(DI)
+ SHRL $8, R11
+ SHLB $5, R11
+ SUBB $4, AX
+ SHLB $2, AX
+ ORB AX, R11
+ ORB $1, R11
+ MOVB R11, 0(DI)
+ ADDQ $2, DI
+
+ // Return the number of bytes written.
+ SUBQ SI, DI
+ MOVQ DI, ret+40(FP)
+ RET
+
+step3:
+ // Emit the remaining copy, encoded as 3 bytes.
+ SUBL $1, AX
+ SHLB $2, AX
+ ORB $2, AX
+ MOVB AX, 0(DI)
+ MOVW R11, 1(DI)
+ ADDQ $3, DI
+
+ // Return the number of bytes written.
+ SUBQ SI, DI
+ MOVQ DI, ret+40(FP)
+ RET
+
+// ----------------------------------------------------------------------------
+
+// func extendMatch(src []byte, i, j int) int
+//
+// All local variables fit into registers. The register allocation:
+// - DX &src[0]
+// - SI &src[j]
+// - R13 &src[len(src) - 8]
+// - R14 &src[len(src)]
+// - R15 &src[i]
+//
+// The unusual register allocation of local variables, such as R15 for a source
+// pointer, matches the allocation used at the call site in encodeBlock, which
+// makes it easier to manually inline this function.
+TEXT ·extendMatch(SB), NOSPLIT, $0-48
+ MOVQ src_base+0(FP), DX
+ MOVQ src_len+8(FP), R14
+ MOVQ i+24(FP), R15
+ MOVQ j+32(FP), SI
+ ADDQ DX, R14
+ ADDQ DX, R15
+ ADDQ DX, SI
+ MOVQ R14, R13
+ SUBQ $8, R13
+
+cmp8:
+ // As long as we are 8 or more bytes before the end of src, we can load and
+ // compare 8 bytes at a time. If those 8 bytes are equal, repeat.
+ CMPQ SI, R13
+ JA cmp1
+ MOVQ (R15), AX
+ MOVQ (SI), BX
+ CMPQ AX, BX
+ JNE bsf
+ ADDQ $8, R15
+ ADDQ $8, SI
+ JMP cmp8
+
+bsf:
+ // If those 8 bytes were not equal, XOR the two 8 byte values, and return
+ // the index of the first byte that differs. The BSF instruction finds the
+ // least significant 1 bit, the amd64 architecture is little-endian, and
+ // the shift by 3 converts a bit index to a byte index.
+ XORQ AX, BX
+ BSFQ BX, BX
+ SHRQ $3, BX
+ ADDQ BX, SI
+
+ // Convert from &src[ret] to ret.
+ SUBQ DX, SI
+ MOVQ SI, ret+40(FP)
+ RET
+
+cmp1:
+ // In src's tail, compare 1 byte at a time.
+ CMPQ SI, R14
+ JAE extendMatchEnd
+ MOVB (R15), AX
+ MOVB (SI), BX
+ CMPB AX, BX
+ JNE extendMatchEnd
+ ADDQ $1, R15
+ ADDQ $1, SI
+ JMP cmp1
+
+extendMatchEnd:
+ // Convert from &src[ret] to ret.
+ SUBQ DX, SI
+ MOVQ SI, ret+40(FP)
+ RET
+
+// ----------------------------------------------------------------------------
+
+// func encodeBlock(dst, src []byte) (d int)
+//
+// All local variables fit into registers, other than "var table". The register
+// allocation:
+// - AX . .
+// - BX . .
+// - CX 56 shift (note that amd64 shifts by non-immediates must use CX).
+// - DX 64 &src[0], tableSize
+// - SI 72 &src[s]
+// - DI 80 &dst[d]
+// - R9 88 sLimit
+// - R10 . &src[nextEmit]
+// - R11 96 prevHash, currHash, nextHash, offset
+// - R12 104 &src[base], skip
+// - R13 . &src[nextS], &src[len(src) - 8]
+// - R14 . len(src), bytesBetweenHashLookups, &src[len(src)], x
+// - R15 112 candidate
+//
+// The second column (56, 64, etc) is the stack offset to spill the registers
+// when calling other functions. We could pack this slightly tighter, but it's
+// simpler to have a dedicated spill map independent of the function called.
+//
+// "var table [maxTableSize]uint16" takes up 32768 bytes of stack space. An
+// extra 56 bytes, to call other functions, and an extra 64 bytes, to spill
+// local variables (registers) during calls gives 32768 + 56 + 64 = 32888.
+TEXT ·encodeBlock(SB), 0, $32888-56
+ MOVQ dst_base+0(FP), DI
+ MOVQ src_base+24(FP), SI
+ MOVQ src_len+32(FP), R14
+
+ // shift, tableSize := uint32(32-8), 1<<8
+ MOVQ $24, CX
+ MOVQ $256, DX
+
+calcShift:
+ // for ; tableSize < maxTableSize && tableSize < len(src); tableSize *= 2 {
+ // shift--
+ // }
+ CMPQ DX, $16384
+ JGE varTable
+ CMPQ DX, R14
+ JGE varTable
+ SUBQ $1, CX
+ SHLQ $1, DX
+ JMP calcShift
+
+varTable:
+ // var table [maxTableSize]uint16
+ //
+ // In the asm code, unlike the Go code, we can zero-initialize only the
+ // first tableSize elements. Each uint16 element is 2 bytes and each MOVOU
+ // writes 16 bytes, so we can do only tableSize/8 writes instead of the
+ // 2048 writes that would zero-initialize all of table's 32768 bytes.
+ SHRQ $3, DX
+ LEAQ table-32768(SP), BX
+ PXOR X0, X0
+
+memclr:
+ MOVOU X0, 0(BX)
+ ADDQ $16, BX
+ SUBQ $1, DX
+ JNZ memclr
+
+ // !!! DX = &src[0]
+ MOVQ SI, DX
+
+ // sLimit := len(src) - inputMargin
+ MOVQ R14, R9
+ SUBQ $15, R9
+
+ // !!! Pre-emptively spill CX, DX and R9 to the stack. Their values don't
+ // change for the rest of the function.
+ MOVQ CX, 56(SP)
+ MOVQ DX, 64(SP)
+ MOVQ R9, 88(SP)
+
+ // nextEmit := 0
+ MOVQ DX, R10
+
+ // s := 1
+ ADDQ $1, SI
+
+ // nextHash := hash(load32(src, s), shift)
+ MOVL 0(SI), R11
+ IMULL $0x1e35a7bd, R11
+ SHRL CX, R11
+
+outer:
+ // for { etc }
+
+ // skip := 32
+ MOVQ $32, R12
+
+ // nextS := s
+ MOVQ SI, R13
+
+ // candidate := 0
+ MOVQ $0, R15
+
+inner0:
+ // for { etc }
+
+ // s := nextS
+ MOVQ R13, SI
+
+ // bytesBetweenHashLookups := skip >> 5
+ MOVQ R12, R14
+ SHRQ $5, R14
+
+ // nextS = s + bytesBetweenHashLookups
+ ADDQ R14, R13
+
+ // skip += bytesBetweenHashLookups
+ ADDQ R14, R12
+
+ // if nextS > sLimit { goto emitRemainder }
+ MOVQ R13, AX
+ SUBQ DX, AX
+ CMPQ AX, R9
+ JA emitRemainder
+
+ // candidate = int(table[nextHash])
+ // XXX: MOVWQZX table-32768(SP)(R11*2), R15
+ // XXX: 4e 0f b7 7c 5c 78 movzwq 0x78(%rsp,%r11,2),%r15
+ BYTE $0x4e
+ BYTE $0x0f
+ BYTE $0xb7
+ BYTE $0x7c
+ BYTE $0x5c
+ BYTE $0x78
+
+ // table[nextHash] = uint16(s)
+ MOVQ SI, AX
+ SUBQ DX, AX
+
+ // XXX: MOVW AX, table-32768(SP)(R11*2)
+ // XXX: 66 42 89 44 5c 78 mov %ax,0x78(%rsp,%r11,2)
+ BYTE $0x66
+ BYTE $0x42
+ BYTE $0x89
+ BYTE $0x44
+ BYTE $0x5c
+ BYTE $0x78
+
+ // nextHash = hash(load32(src, nextS), shift)
+ MOVL 0(R13), R11
+ IMULL $0x1e35a7bd, R11
+ SHRL CX, R11
+
+ // if load32(src, s) != load32(src, candidate) { continue } break
+ MOVL 0(SI), AX
+ MOVL (DX)(R15*1), BX
+ CMPL AX, BX
+ JNE inner0
+
+fourByteMatch:
+ // As per the encode_other.go code:
+ //
+ // A 4-byte match has been found. We'll later see etc.
+
+ // !!! Jump to a fast path for short (<= 16 byte) literals. See the comment
+ // on inputMargin in encode.go.
+ MOVQ SI, AX
+ SUBQ R10, AX
+ CMPQ AX, $16
+ JLE emitLiteralFastPath
+
+ // ----------------------------------------
+ // Begin inline of the emitLiteral call.
+ //
+ // d += emitLiteral(dst[d:], src[nextEmit:s])
+
+ MOVL AX, BX
+ SUBL $1, BX
+
+ CMPL BX, $60
+ JLT inlineEmitLiteralOneByte
+ CMPL BX, $256
+ JLT inlineEmitLiteralTwoBytes
+
+inlineEmitLiteralThreeBytes:
+ MOVB $0xf4, 0(DI)
+ MOVW BX, 1(DI)
+ ADDQ $3, DI
+ JMP inlineEmitLiteralMemmove
+
+inlineEmitLiteralTwoBytes:
+ MOVB $0xf0, 0(DI)
+ MOVB BX, 1(DI)
+ ADDQ $2, DI
+ JMP inlineEmitLiteralMemmove
+
+inlineEmitLiteralOneByte:
+ SHLB $2, BX
+ MOVB BX, 0(DI)
+ ADDQ $1, DI
+
+inlineEmitLiteralMemmove:
+ // Spill local variables (registers) onto the stack; call; unspill.
+ //
+ // copy(dst[i:], lit)
+ //
+ // This means calling runtime·memmove(&dst[i], &lit[0], len(lit)), so we push
+ // DI, R10 and AX as arguments.
+ MOVQ DI, 0(SP)
+ MOVQ R10, 8(SP)
+ MOVQ AX, 16(SP)
+ ADDQ AX, DI // Finish the "d +=" part of "d += emitLiteral(etc)".
+ MOVQ SI, 72(SP)
+ MOVQ DI, 80(SP)
+ MOVQ R15, 112(SP)
+ CALL runtime·memmove(SB)
+ MOVQ 56(SP), CX
+ MOVQ 64(SP), DX
+ MOVQ 72(SP), SI
+ MOVQ 80(SP), DI
+ MOVQ 88(SP), R9
+ MOVQ 112(SP), R15
+ JMP inner1
+
+inlineEmitLiteralEnd:
+ // End inline of the emitLiteral call.
+ // ----------------------------------------
+
+emitLiteralFastPath:
+ // !!! Emit the 1-byte encoding "uint8(len(lit)-1)<<2".
+ MOVB AX, BX
+ SUBB $1, BX
+ SHLB $2, BX
+ MOVB BX, (DI)
+ ADDQ $1, DI
+
+ // !!! Implement the copy from lit to dst as a 16-byte load and store.
+ // (Encode's documentation says that dst and src must not overlap.)
+ //
+ // This always copies 16 bytes, instead of only len(lit) bytes, but that's
+ // OK. Subsequent iterations will fix up the overrun.
+ //
+ // Note that on amd64, it is legal and cheap to issue unaligned 8-byte or
+ // 16-byte loads and stores. This technique probably wouldn't be as
+ // effective on architectures that are fussier about alignment.
+ MOVOU 0(R10), X0
+ MOVOU X0, 0(DI)
+ ADDQ AX, DI
+
+inner1:
+ // for { etc }
+
+ // base := s
+ MOVQ SI, R12
+
+ // !!! offset := base - candidate
+ MOVQ R12, R11
+ SUBQ R15, R11
+ SUBQ DX, R11
+
+ // ----------------------------------------
+ // Begin inline of the extendMatch call.
+ //
+ // s = extendMatch(src, candidate+4, s+4)
+
+ // !!! R14 = &src[len(src)]
+ MOVQ src_len+32(FP), R14
+ ADDQ DX, R14
+
+ // !!! R13 = &src[len(src) - 8]
+ MOVQ R14, R13
+ SUBQ $8, R13
+
+ // !!! R15 = &src[candidate + 4]
+ ADDQ $4, R15
+ ADDQ DX, R15
+
+ // !!! s += 4
+ ADDQ $4, SI
+
+inlineExtendMatchCmp8:
+ // As long as we are 8 or more bytes before the end of src, we can load and
+ // compare 8 bytes at a time. If those 8 bytes are equal, repeat.
+ CMPQ SI, R13
+ JA inlineExtendMatchCmp1
+ MOVQ (R15), AX
+ MOVQ (SI), BX
+ CMPQ AX, BX
+ JNE inlineExtendMatchBSF
+ ADDQ $8, R15
+ ADDQ $8, SI
+ JMP inlineExtendMatchCmp8
+
+inlineExtendMatchBSF:
+ // If those 8 bytes were not equal, XOR the two 8 byte values, and return
+ // the index of the first byte that differs. The BSF instruction finds the
+ // least significant 1 bit, the amd64 architecture is little-endian, and
+ // the shift by 3 converts a bit index to a byte index.
+ XORQ AX, BX
+ BSFQ BX, BX
+ SHRQ $3, BX
+ ADDQ BX, SI
+ JMP inlineExtendMatchEnd
+
+inlineExtendMatchCmp1:
+ // In src's tail, compare 1 byte at a time.
+ CMPQ SI, R14
+ JAE inlineExtendMatchEnd
+ MOVB (R15), AX
+ MOVB (SI), BX
+ CMPB AX, BX
+ JNE inlineExtendMatchEnd
+ ADDQ $1, R15
+ ADDQ $1, SI
+ JMP inlineExtendMatchCmp1
+
+inlineExtendMatchEnd:
+ // End inline of the extendMatch call.
+ // ----------------------------------------
+
+ // ----------------------------------------
+ // Begin inline of the emitCopy call.
+ //
+ // d += emitCopy(dst[d:], base-candidate, s-base)
+
+ // !!! length := s - base
+ MOVQ SI, AX
+ SUBQ R12, AX
+
+inlineEmitCopyLoop0:
+ // for length >= 68 { etc }
+ CMPL AX, $68
+ JLT inlineEmitCopyStep1
+
+ // Emit a length 64 copy, encoded as 3 bytes.
+ MOVB $0xfe, 0(DI)
+ MOVW R11, 1(DI)
+ ADDQ $3, DI
+ SUBL $64, AX
+ JMP inlineEmitCopyLoop0
+
+inlineEmitCopyStep1:
+ // if length > 64 { etc }
+ CMPL AX, $64
+ JLE inlineEmitCopyStep2
+
+ // Emit a length 60 copy, encoded as 3 bytes.
+ MOVB $0xee, 0(DI)
+ MOVW R11, 1(DI)
+ ADDQ $3, DI
+ SUBL $60, AX
+
+inlineEmitCopyStep2:
+ // if length >= 12 || offset >= 2048 { goto inlineEmitCopyStep3 }
+ CMPL AX, $12
+ JGE inlineEmitCopyStep3
+ CMPL R11, $2048
+ JGE inlineEmitCopyStep3
+
+ // Emit the remaining copy, encoded as 2 bytes.
+ MOVB R11, 1(DI)
+ SHRL $8, R11
+ SHLB $5, R11
+ SUBB $4, AX
+ SHLB $2, AX
+ ORB AX, R11
+ ORB $1, R11
+ MOVB R11, 0(DI)
+ ADDQ $2, DI
+ JMP inlineEmitCopyEnd
+
+inlineEmitCopyStep3:
+ // Emit the remaining copy, encoded as 3 bytes.
+ SUBL $1, AX
+ SHLB $2, AX
+ ORB $2, AX
+ MOVB AX, 0(DI)
+ MOVW R11, 1(DI)
+ ADDQ $3, DI
+
+inlineEmitCopyEnd:
+ // End inline of the emitCopy call.
+ // ----------------------------------------
+
+ // nextEmit = s
+ MOVQ SI, R10
+
+ // if s >= sLimit { goto emitRemainder }
+ MOVQ SI, AX
+ SUBQ DX, AX
+ CMPQ AX, R9
+ JAE emitRemainder
+
+ // As per the encode_other.go code:
+ //
+ // We could immediately etc.
+
+ // x := load64(src, s-1)
+ MOVQ -1(SI), R14
+
+ // prevHash := hash(uint32(x>>0), shift)
+ MOVL R14, R11
+ IMULL $0x1e35a7bd, R11
+ SHRL CX, R11
+
+ // table[prevHash] = uint16(s-1)
+ MOVQ SI, AX
+ SUBQ DX, AX
+ SUBQ $1, AX
+
+ // XXX: MOVW AX, table-32768(SP)(R11*2)
+ // XXX: 66 42 89 44 5c 78 mov %ax,0x78(%rsp,%r11,2)
+ BYTE $0x66
+ BYTE $0x42
+ BYTE $0x89
+ BYTE $0x44
+ BYTE $0x5c
+ BYTE $0x78
+
+ // currHash := hash(uint32(x>>8), shift)
+ SHRQ $8, R14
+ MOVL R14, R11
+ IMULL $0x1e35a7bd, R11
+ SHRL CX, R11
+
+ // candidate = int(table[currHash])
+ // XXX: MOVWQZX table-32768(SP)(R11*2), R15
+ // XXX: 4e 0f b7 7c 5c 78 movzwq 0x78(%rsp,%r11,2),%r15
+ BYTE $0x4e
+ BYTE $0x0f
+ BYTE $0xb7
+ BYTE $0x7c
+ BYTE $0x5c
+ BYTE $0x78
+
+ // table[currHash] = uint16(s)
+ ADDQ $1, AX
+
+ // XXX: MOVW AX, table-32768(SP)(R11*2)
+ // XXX: 66 42 89 44 5c 78 mov %ax,0x78(%rsp,%r11,2)
+ BYTE $0x66
+ BYTE $0x42
+ BYTE $0x89
+ BYTE $0x44
+ BYTE $0x5c
+ BYTE $0x78
+
+ // if uint32(x>>8) == load32(src, candidate) { continue }
+ MOVL (DX)(R15*1), BX
+ CMPL R14, BX
+ JEQ inner1
+
+ // nextHash = hash(uint32(x>>16), shift)
+ SHRQ $8, R14
+ MOVL R14, R11
+ IMULL $0x1e35a7bd, R11
+ SHRL CX, R11
+
+ // s++
+ ADDQ $1, SI
+
+ // break out of the inner1 for loop, i.e. continue the outer loop.
+ JMP outer
+
+emitRemainder:
+ // if nextEmit < len(src) { etc }
+ MOVQ src_len+32(FP), AX
+ ADDQ DX, AX
+ CMPQ R10, AX
+ JEQ encodeBlockEnd
+
+ // d += emitLiteral(dst[d:], src[nextEmit:])
+ //
+ // Push args.
+ MOVQ DI, 0(SP)
+ MOVQ $0, 8(SP) // Unnecessary, as the callee ignores it, but conservative.
+ MOVQ $0, 16(SP) // Unnecessary, as the callee ignores it, but conservative.
+ MOVQ R10, 24(SP)
+ SUBQ R10, AX
+ MOVQ AX, 32(SP)
+ MOVQ AX, 40(SP) // Unnecessary, as the callee ignores it, but conservative.
+
+ // Spill local variables (registers) onto the stack; call; unspill.
+ MOVQ DI, 80(SP)
+ CALL ·emitLiteral(SB)
+ MOVQ 80(SP), DI
+
+ // Finish the "d +=" part of "d += emitLiteral(etc)".
+ ADDQ 48(SP), DI
+
+encodeBlockEnd:
+ MOVQ dst_base+0(FP), AX
+ SUBQ AX, DI
+ MOVQ DI, d+48(FP)
+ RET
diff --git a/vendor/github.com/golang/snappy/encode_other.go b/vendor/github.com/golang/snappy/encode_other.go
new file mode 100644
index 00000000..dbcae905
--- /dev/null
+++ b/vendor/github.com/golang/snappy/encode_other.go
@@ -0,0 +1,238 @@
+// Copyright 2016 The Snappy-Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !amd64 appengine !gc noasm
+
+package snappy
+
+func load32(b []byte, i int) uint32 {
+ b = b[i : i+4 : len(b)] // Help the compiler eliminate bounds checks on the next line.
+ return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24
+}
+
+func load64(b []byte, i int) uint64 {
+ b = b[i : i+8 : len(b)] // Help the compiler eliminate bounds checks on the next line.
+ return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 |
+ uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56
+}
+
+// emitLiteral writes a literal chunk and returns the number of bytes written.
+//
+// It assumes that:
+// dst is long enough to hold the encoded bytes
+// 1 <= len(lit) && len(lit) <= 65536
+func emitLiteral(dst, lit []byte) int {
+ i, n := 0, uint(len(lit)-1)
+ switch {
+ case n < 60:
+ dst[0] = uint8(n)<<2 | tagLiteral
+ i = 1
+ case n < 1<<8:
+ dst[0] = 60<<2 | tagLiteral
+ dst[1] = uint8(n)
+ i = 2
+ default:
+ dst[0] = 61<<2 | tagLiteral
+ dst[1] = uint8(n)
+ dst[2] = uint8(n >> 8)
+ i = 3
+ }
+ return i + copy(dst[i:], lit)
+}
+
+// emitCopy writes a copy chunk and returns the number of bytes written.
+//
+// It assumes that:
+// dst is long enough to hold the encoded bytes
+// 1 <= offset && offset <= 65535
+// 4 <= length && length <= 65535
+func emitCopy(dst []byte, offset, length int) int {
+ i := 0
+ // The maximum length for a single tagCopy1 or tagCopy2 op is 64 bytes. The
+ // threshold for this loop is a little higher (at 68 = 64 + 4), and the
+ // length emitted down below is is a little lower (at 60 = 64 - 4), because
+ // it's shorter to encode a length 67 copy as a length 60 tagCopy2 followed
+ // by a length 7 tagCopy1 (which encodes as 3+2 bytes) than to encode it as
+ // a length 64 tagCopy2 followed by a length 3 tagCopy2 (which encodes as
+ // 3+3 bytes). The magic 4 in the 64±4 is because the minimum length for a
+ // tagCopy1 op is 4 bytes, which is why a length 3 copy has to be an
+ // encodes-as-3-bytes tagCopy2 instead of an encodes-as-2-bytes tagCopy1.
+ for length >= 68 {
+ // Emit a length 64 copy, encoded as 3 bytes.
+ dst[i+0] = 63<<2 | tagCopy2
+ dst[i+1] = uint8(offset)
+ dst[i+2] = uint8(offset >> 8)
+ i += 3
+ length -= 64
+ }
+ if length > 64 {
+ // Emit a length 60 copy, encoded as 3 bytes.
+ dst[i+0] = 59<<2 | tagCopy2
+ dst[i+1] = uint8(offset)
+ dst[i+2] = uint8(offset >> 8)
+ i += 3
+ length -= 60
+ }
+ if length >= 12 || offset >= 2048 {
+ // Emit the remaining copy, encoded as 3 bytes.
+ dst[i+0] = uint8(length-1)<<2 | tagCopy2
+ dst[i+1] = uint8(offset)
+ dst[i+2] = uint8(offset >> 8)
+ return i + 3
+ }
+ // Emit the remaining copy, encoded as 2 bytes.
+ dst[i+0] = uint8(offset>>8)<<5 | uint8(length-4)<<2 | tagCopy1
+ dst[i+1] = uint8(offset)
+ return i + 2
+}
+
+// extendMatch returns the largest k such that k <= len(src) and that
+// src[i:i+k-j] and src[j:k] have the same contents.
+//
+// It assumes that:
+// 0 <= i && i < j && j <= len(src)
+func extendMatch(src []byte, i, j int) int {
+ for ; j < len(src) && src[i] == src[j]; i, j = i+1, j+1 {
+ }
+ return j
+}
+
+func hash(u, shift uint32) uint32 {
+ return (u * 0x1e35a7bd) >> shift
+}
+
+// encodeBlock encodes a non-empty src to a guaranteed-large-enough dst. It
+// assumes that the varint-encoded length of the decompressed bytes has already
+// been written.
+//
+// It also assumes that:
+// len(dst) >= MaxEncodedLen(len(src)) &&
+// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize
+func encodeBlock(dst, src []byte) (d int) {
+ // Initialize the hash table. Its size ranges from 1<<8 to 1<<14 inclusive.
+ // The table element type is uint16, as s < sLimit and sLimit < len(src)
+ // and len(src) <= maxBlockSize and maxBlockSize == 65536.
+ const (
+ maxTableSize = 1 << 14
+ // tableMask is redundant, but helps the compiler eliminate bounds
+ // checks.
+ tableMask = maxTableSize - 1
+ )
+ shift := uint32(32 - 8)
+ for tableSize := 1 << 8; tableSize < maxTableSize && tableSize < len(src); tableSize *= 2 {
+ shift--
+ }
+ // In Go, all array elements are zero-initialized, so there is no advantage
+ // to a smaller tableSize per se. However, it matches the C++ algorithm,
+ // and in the asm versions of this code, we can get away with zeroing only
+ // the first tableSize elements.
+ var table [maxTableSize]uint16
+
+ // sLimit is when to stop looking for offset/length copies. The inputMargin
+ // lets us use a fast path for emitLiteral in the main loop, while we are
+ // looking for copies.
+ sLimit := len(src) - inputMargin
+
+ // nextEmit is where in src the next emitLiteral should start from.
+ nextEmit := 0
+
+ // The encoded form must start with a literal, as there are no previous
+ // bytes to copy, so we start looking for hash matches at s == 1.
+ s := 1
+ nextHash := hash(load32(src, s), shift)
+
+ for {
+ // Copied from the C++ snappy implementation:
+ //
+ // Heuristic match skipping: If 32 bytes are scanned with no matches
+ // found, start looking only at every other byte. If 32 more bytes are
+ // scanned (or skipped), look at every third byte, etc.. When a match
+ // is found, immediately go back to looking at every byte. This is a
+ // small loss (~5% performance, ~0.1% density) for compressible data
+ // due to more bookkeeping, but for non-compressible data (such as
+ // JPEG) it's a huge win since the compressor quickly "realizes" the
+ // data is incompressible and doesn't bother looking for matches
+ // everywhere.
+ //
+ // The "skip" variable keeps track of how many bytes there are since
+ // the last match; dividing it by 32 (ie. right-shifting by five) gives
+ // the number of bytes to move ahead for each iteration.
+ skip := 32
+
+ nextS := s
+ candidate := 0
+ for {
+ s = nextS
+ bytesBetweenHashLookups := skip >> 5
+ nextS = s + bytesBetweenHashLookups
+ skip += bytesBetweenHashLookups
+ if nextS > sLimit {
+ goto emitRemainder
+ }
+ candidate = int(table[nextHash&tableMask])
+ table[nextHash&tableMask] = uint16(s)
+ nextHash = hash(load32(src, nextS), shift)
+ if load32(src, s) == load32(src, candidate) {
+ break
+ }
+ }
+
+ // A 4-byte match has been found. We'll later see if more than 4 bytes
+ // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit
+ // them as literal bytes.
+ d += emitLiteral(dst[d:], src[nextEmit:s])
+
+ // Call emitCopy, and then see if another emitCopy could be our next
+ // move. Repeat until we find no match for the input immediately after
+ // what was consumed by the last emitCopy call.
+ //
+ // If we exit this loop normally then we need to call emitLiteral next,
+ // though we don't yet know how big the literal will be. We handle that
+ // by proceeding to the next iteration of the main loop. We also can
+ // exit this loop via goto if we get close to exhausting the input.
+ for {
+ // Invariant: we have a 4-byte match at s, and no need to emit any
+ // literal bytes prior to s.
+ base := s
+
+ // Extend the 4-byte match as long as possible.
+ //
+ // This is an inlined version of:
+ // s = extendMatch(src, candidate+4, s+4)
+ s += 4
+ for i := candidate + 4; s < len(src) && src[i] == src[s]; i, s = i+1, s+1 {
+ }
+
+ d += emitCopy(dst[d:], base-candidate, s-base)
+ nextEmit = s
+ if s >= sLimit {
+ goto emitRemainder
+ }
+
+ // We could immediately start working at s now, but to improve
+ // compression we first update the hash table at s-1 and at s. If
+ // another emitCopy is not our next move, also calculate nextHash
+ // at s+1. At least on GOARCH=amd64, these three hash calculations
+ // are faster as one load64 call (with some shifts) instead of
+ // three load32 calls.
+ x := load64(src, s-1)
+ prevHash := hash(uint32(x>>0), shift)
+ table[prevHash&tableMask] = uint16(s - 1)
+ currHash := hash(uint32(x>>8), shift)
+ candidate = int(table[currHash&tableMask])
+ table[currHash&tableMask] = uint16(s)
+ if uint32(x>>8) != load32(src, candidate) {
+ nextHash = hash(uint32(x>>16), shift)
+ s++
+ break
+ }
+ }
+ }
+
+emitRemainder:
+ if nextEmit < len(src) {
+ d += emitLiteral(dst[d:], src[nextEmit:])
+ }
+ return d
+}
diff --git a/vendor/github.com/golang/snappy/snappy.go b/vendor/github.com/golang/snappy/snappy.go
new file mode 100644
index 00000000..0cf5e379
--- /dev/null
+++ b/vendor/github.com/golang/snappy/snappy.go
@@ -0,0 +1,87 @@
+// Copyright 2011 The Snappy-Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package snappy implements the snappy block-based compression format.
+// It aims for very high speeds and reasonable compression.
+//
+// The C++ snappy implementation is at https://github.com/google/snappy
+package snappy // import "github.com/golang/snappy"
+
+import (
+ "hash/crc32"
+)
+
+/*
+Each encoded block begins with the varint-encoded length of the decoded data,
+followed by a sequence of chunks. Chunks begin and end on byte boundaries. The
+first byte of each chunk is broken into its 2 least and 6 most significant bits
+called l and m: l ranges in [0, 4) and m ranges in [0, 64). l is the chunk tag.
+Zero means a literal tag. All other values mean a copy tag.
+
+For literal tags:
+ - If m < 60, the next 1 + m bytes are literal bytes.
+ - Otherwise, let n be the little-endian unsigned integer denoted by the next
+ m - 59 bytes. The next 1 + n bytes after that are literal bytes.
+
+For copy tags, length bytes are copied from offset bytes ago, in the style of
+Lempel-Ziv compression algorithms. In particular:
+ - For l == 1, the offset ranges in [0, 1<<11) and the length in [4, 12).
+ The length is 4 + the low 3 bits of m. The high 3 bits of m form bits 8-10
+ of the offset. The next byte is bits 0-7 of the offset.
+ - For l == 2, the offset ranges in [0, 1<<16) and the length in [1, 65).
+ The length is 1 + m. The offset is the little-endian unsigned integer
+ denoted by the next 2 bytes.
+ - For l == 3, this tag is a legacy format that is no longer issued by most
+ encoders. Nonetheless, the offset ranges in [0, 1<<32) and the length in
+ [1, 65). The length is 1 + m. The offset is the little-endian unsigned
+ integer denoted by the next 4 bytes.
+*/
+const (
+ tagLiteral = 0x00
+ tagCopy1 = 0x01
+ tagCopy2 = 0x02
+ tagCopy4 = 0x03
+)
+
+const (
+ checksumSize = 4
+ chunkHeaderSize = 4
+ magicChunk = "\xff\x06\x00\x00" + magicBody
+ magicBody = "sNaPpY"
+
+ // maxBlockSize is the maximum size of the input to encodeBlock. It is not
+ // part of the wire format per se, but some parts of the encoder assume
+ // that an offset fits into a uint16.
+ //
+ // Also, for the framing format (Writer type instead of Encode function),
+ // https://github.com/google/snappy/blob/master/framing_format.txt says
+ // that "the uncompressed data in a chunk must be no longer than 65536
+ // bytes".
+ maxBlockSize = 65536
+
+ // maxEncodedLenOfMaxBlockSize equals MaxEncodedLen(maxBlockSize), but is
+ // hard coded to be a const instead of a variable, so that obufLen can also
+ // be a const. Their equivalence is confirmed by
+ // TestMaxEncodedLenOfMaxBlockSize.
+ maxEncodedLenOfMaxBlockSize = 76490
+
+ obufHeaderLen = len(magicChunk) + checksumSize + chunkHeaderSize
+ obufLen = obufHeaderLen + maxEncodedLenOfMaxBlockSize
+)
+
+const (
+ chunkTypeCompressedData = 0x00
+ chunkTypeUncompressedData = 0x01
+ chunkTypePadding = 0xfe
+ chunkTypeStreamIdentifier = 0xff
+)
+
+var crcTable = crc32.MakeTable(crc32.Castagnoli)
+
+// crc implements the checksum specified in section 3 of
+// https://github.com/google/snappy/blob/master/framing_format.txt
+func crc(b []byte) uint32 {
+ c := crc32.Update(0, crcTable, b)
+ return uint32(c>>15|c<<17) + 0xa282ead8
+}
diff --git a/vendor/github.com/gorilla/mux/README.md b/vendor/github.com/gorilla/mux/README.md
index 67a79e00..85953973 100644
--- a/vendor/github.com/gorilla/mux/README.md
+++ b/vendor/github.com/gorilla/mux/README.md
@@ -135,14 +135,6 @@ r.HandleFunc("/products", ProductsHandler).
Schemes("http")
```
-Routes are tested in the order they were added to the router. If two routes match, the first one wins:
-
-```go
-r := mux.NewRouter()
-r.HandleFunc("/specific", specificHandler)
-r.PathPrefix("/").Handler(catchAllHandler)
-```
-
Setting the same matching conditions again and again can be boring, so we have a way to group several routes that share the same requirements. We call it "subrouting".
For example, let's say we have several URLs that should only match when the host is `www.example.com`. Create a route for that host and get a "subrouter" from it:
@@ -201,34 +193,22 @@ func main() {
r.HandleFunc("/products", handler).Methods("POST")
r.HandleFunc("/articles", handler).Methods("GET")
r.HandleFunc("/articles/{id}", handler).Methods("GET", "PUT")
- r.HandleFunc("/authors", handler).Queries("surname", "{surname}")
r.Walk(func(route *mux.Route, router *mux.Router, ancestors []*mux.Route) error {
t, err := route.GetPathTemplate()
if err != nil {
return err
}
- qt, err := route.GetQueriesTemplates()
- if err != nil {
- return err
- }
// p will contain regular expression is compatible with regular expression in Perl, Python, and other languages.
// for instance the regular expression for path '/articles/{id}' will be '^/articles/(?P[^/]+)$'
p, err := route.GetPathRegexp()
if err != nil {
return err
}
- // qr will contain a list of regular expressions with the same semantics as GetPathRegexp,
- // just applied to the Queries pairs instead, e.g., 'Queries("surname", "{surname}") will return
- // {"^surname=(?P.*)$}. Where each combined query pair will have an entry in the list.
- qr, err := route.GetQueriesRegexp()
- if err != nil {
- return err
- }
m, err := route.GetMethods()
if err != nil {
return err
}
- fmt.Println(strings.Join(m, ","), strings.Join(qt, ","), strings.Join(qr, ","), t, p)
+ fmt.Println(strings.Join(m, ","), t, p)
return nil
})
http.Handle("/", r)
@@ -294,7 +274,7 @@ This also works for host and query value variables:
r := mux.NewRouter()
r.Host("{subdomain}.domain.com").
Path("/articles/{category}/{id:[0-9]+}").
- Queries("filter", "{filter}").
+ Queries("filter", "{filter}")
HandlerFunc(ArticleHandler).
Name("article")
@@ -351,34 +331,22 @@ r.HandleFunc("/", handler)
r.HandleFunc("/products", handler).Methods("POST")
r.HandleFunc("/articles", handler).Methods("GET")
r.HandleFunc("/articles/{id}", handler).Methods("GET", "PUT")
-r.HandleFunc("/authors", handler).Queries("surname", "{surname}")
r.Walk(func(route *mux.Route, router *mux.Router, ancestors []*mux.Route) error {
t, err := route.GetPathTemplate()
if err != nil {
return err
}
- qt, err := route.GetQueriesTemplates()
- if err != nil {
- return err
- }
// p will contain a regular expression that is compatible with regular expressions in Perl, Python, and other languages.
// For example, the regular expression for path '/articles/{id}' will be '^/articles/(?P[^/]+)$'.
p, err := route.GetPathRegexp()
if err != nil {
return err
}
- // qr will contain a list of regular expressions with the same semantics as GetPathRegexp,
- // just applied to the Queries pairs instead, e.g., 'Queries("surname", "{surname}") will return
- // {"^surname=(?P.*)$}. Where each combined query pair will have an entry in the list.
- qr, err := route.GetQueriesRegexp()
- if err != nil {
- return err
- }
m, err := route.GetMethods()
if err != nil {
return err
}
- fmt.Println(strings.Join(m, ","), strings.Join(qt, ","), strings.Join(qr, ","), t, p)
+ fmt.Println(strings.Join(m, ","), t, p)
return nil
})
```
diff --git a/vendor/github.com/gorilla/mux/mux.go b/vendor/github.com/gorilla/mux/mux.go
index 9aec0fac..aa195979 100644
--- a/vendor/github.com/gorilla/mux/mux.go
+++ b/vendor/github.com/gorilla/mux/mux.go
@@ -10,11 +10,7 @@ import (
"net/http"
"path"
"regexp"
-)
-
-var (
- ErrMethodMismatch = errors.New("method is not allowed")
- ErrNotFound = errors.New("no matching route was found")
+ "strings"
)
// NewRouter returns a new router instance.
@@ -43,10 +39,6 @@ func NewRouter() *Router {
type Router struct {
// Configurable Handler to be used when no route matches.
NotFoundHandler http.Handler
-
- // Configurable Handler to be used when the request method does not match the route.
- MethodNotAllowedHandler http.Handler
-
// Parent route, if this is a subrouter.
parent parentRoute
// Routes to be matched, in order.
@@ -65,17 +57,7 @@ type Router struct {
useEncodedPath bool
}
-// Match attempts to match the given request against the router's registered routes.
-//
-// If the request matches a route of this router or one of its subrouters the Route,
-// Handler, and Vars fields of the the match argument are filled and this function
-// returns true.
-//
-// If the request does not match any of this router's or its subrouters' routes
-// then this function returns false. If available, a reason for the match failure
-// will be filled in the match argument's MatchErr field. If the match failure type
-// (eg: not found) has a registered handler, the handler is assigned to the Handler
-// field of the match argument.
+// Match matches registered routes against the request.
func (r *Router) Match(req *http.Request, match *RouteMatch) bool {
for _, route := range r.routes {
if route.Match(req, match) {
@@ -83,23 +65,11 @@ func (r *Router) Match(req *http.Request, match *RouteMatch) bool {
}
}
- if match.MatchErr == ErrMethodMismatch {
- if r.MethodNotAllowedHandler != nil {
- match.Handler = r.MethodNotAllowedHandler
- return true
- } else {
- return false
- }
- }
-
// Closest match for a router (includes sub-routers)
if r.NotFoundHandler != nil {
match.Handler = r.NotFoundHandler
- match.MatchErr = ErrNotFound
return true
}
-
- match.MatchErr = ErrNotFound
return false
}
@@ -111,7 +81,7 @@ func (r *Router) ServeHTTP(w http.ResponseWriter, req *http.Request) {
if !r.skipClean {
path := req.URL.Path
if r.useEncodedPath {
- path = req.URL.EscapedPath()
+ path = getPath(req)
}
// Clean path to canonical form and redirect.
if p := cleanPath(path); p != path {
@@ -135,15 +105,9 @@ func (r *Router) ServeHTTP(w http.ResponseWriter, req *http.Request) {
req = setVars(req, match.Vars)
req = setCurrentRoute(req, match.Route)
}
-
- if handler == nil && match.MatchErr == ErrMethodMismatch {
- handler = methodNotAllowedHandler()
- }
-
if handler == nil {
handler = http.NotFoundHandler()
}
-
if !r.KeepContext {
defer contextClear(req)
}
@@ -196,6 +160,10 @@ func (r *Router) SkipClean(value bool) *Router {
// UseEncodedPath tells the router to match the encoded original path
// to the routes.
// For eg. "/path/foo%2Fbar/to" will match the path "/path/{var}/to".
+// This behavior has the drawback of needing to match routes against
+// r.RequestURI instead of r.URL.Path. Any modifications (such as http.StripPrefix)
+// to r.URL.Path will not affect routing when this flag is on and thus may
+// induce unintended behavior.
//
// If not called, the router will match the unencoded path to the routes.
// For eg. "/path/foo%2Fbar/to" will match the path "/path/foo/bar/to"
@@ -376,11 +344,6 @@ type RouteMatch struct {
Route *Route
Handler http.Handler
Vars map[string]string
-
- // MatchErr is set to appropriate matching error
- // It is set to ErrMethodMismatch if there is a mismatch in
- // the request method and route method
- MatchErr error
}
type contextKey int
@@ -422,6 +385,28 @@ func setCurrentRoute(r *http.Request, val interface{}) *http.Request {
// Helpers
// ----------------------------------------------------------------------------
+// getPath returns the escaped path if possible; doing what URL.EscapedPath()
+// which was added in go1.5 does
+func getPath(req *http.Request) string {
+ if req.RequestURI != "" {
+ // Extract the path from RequestURI (which is escaped unlike URL.Path)
+ // as detailed here as detailed in https://golang.org/pkg/net/url/#URL
+ // for < 1.5 server side workaround
+ // http://localhost/path/here?v=1 -> /path/here
+ path := req.RequestURI
+ path = strings.TrimPrefix(path, req.URL.Scheme+`://`)
+ path = strings.TrimPrefix(path, req.URL.Host)
+ if i := strings.LastIndex(path, "?"); i > -1 {
+ path = path[:i]
+ }
+ if i := strings.LastIndex(path, "#"); i > -1 {
+ path = path[:i]
+ }
+ return path
+ }
+ return req.URL.Path
+}
+
// cleanPath returns the canonical path for p, eliminating . and .. elements.
// Borrowed from the net/http package.
func cleanPath(p string) string {
@@ -560,12 +545,3 @@ func matchMapWithRegex(toCheck map[string]*regexp.Regexp, toMatch map[string][]s
}
return true
}
-
-// methodNotAllowed replies to the request with an HTTP status code 405.
-func methodNotAllowed(w http.ResponseWriter, r *http.Request) {
- w.WriteHeader(http.StatusMethodNotAllowed)
-}
-
-// methodNotAllowedHandler returns a simple request handler
-// that replies to each request with a status code 405.
-func methodNotAllowedHandler() http.Handler { return http.HandlerFunc(methodNotAllowed) }
diff --git a/vendor/github.com/gorilla/mux/regexp.go b/vendor/github.com/gorilla/mux/regexp.go
index e83213b7..80d1f785 100644
--- a/vendor/github.com/gorilla/mux/regexp.go
+++ b/vendor/github.com/gorilla/mux/regexp.go
@@ -141,7 +141,7 @@ type routeRegexp struct {
matchQuery bool
// The strictSlash value defined on the route, but disabled if PathPrefix was used.
strictSlash bool
- // Determines whether to use encoded req.URL.EnscapedPath() or unencoded
+ // Determines whether to use encoded path from getPath function or unencoded
// req.URL.Path for path matching
useEncodedPath bool
// Expanded regexp.
@@ -162,7 +162,7 @@ func (r *routeRegexp) Match(req *http.Request, match *RouteMatch) bool {
}
path := req.URL.Path
if r.useEncodedPath {
- path = req.URL.EscapedPath()
+ path = getPath(req)
}
return r.regexp.MatchString(path)
}
@@ -272,7 +272,7 @@ func (v *routeRegexpGroup) setMatch(req *http.Request, m *RouteMatch, r *Route)
}
path := req.URL.Path
if r.useEncodedPath {
- path = req.URL.EscapedPath()
+ path = getPath(req)
}
// Store path variables.
if v.path != nil {
diff --git a/vendor/github.com/gorilla/mux/route.go b/vendor/github.com/gorilla/mux/route.go
index 69aeae79..6d4a07a2 100644
--- a/vendor/github.com/gorilla/mux/route.go
+++ b/vendor/github.com/gorilla/mux/route.go
@@ -52,31 +52,12 @@ func (r *Route) Match(req *http.Request, match *RouteMatch) bool {
if r.buildOnly || r.err != nil {
return false
}
-
- var matchErr error
-
// Match everything.
for _, m := range r.matchers {
if matched := m.Match(req, match); !matched {
- if _, ok := m.(methodMatcher); ok {
- matchErr = ErrMethodMismatch
- continue
- }
- matchErr = nil
return false
}
}
-
- if matchErr != nil {
- match.MatchErr = matchErr
- return false
- }
-
- if match.MatchErr == ErrMethodMismatch {
- // We found a route which matches request method, clear MatchErr
- match.MatchErr = nil
- }
-
// Yay, we have a match. Let's collect some info about it.
if match.Route == nil {
match.Route = r
@@ -87,7 +68,6 @@ func (r *Route) Match(req *http.Request, match *RouteMatch) bool {
if match.Vars == nil {
match.Vars = make(map[string]string)
}
-
// Set variables.
if r.regexp != nil {
r.regexp.setMatch(req, match, r)
@@ -612,44 +592,6 @@ func (r *Route) GetPathRegexp() (string, error) {
return r.regexp.path.regexp.String(), nil
}
-// GetQueriesRegexp returns the expanded regular expressions used to match the
-// route queries.
-// This is useful for building simple REST API documentation and for instrumentation
-// against third-party services.
-// An empty list will be returned if the route does not have queries.
-func (r *Route) GetQueriesRegexp() ([]string, error) {
- if r.err != nil {
- return nil, r.err
- }
- if r.regexp == nil || r.regexp.queries == nil {
- return nil, errors.New("mux: route doesn't have queries")
- }
- var queries []string
- for _, query := range r.regexp.queries {
- queries = append(queries, query.regexp.String())
- }
- return queries, nil
-}
-
-// GetQueriesTemplates returns the templates used to build the
-// query matching.
-// This is useful for building simple REST API documentation and for instrumentation
-// against third-party services.
-// An empty list will be returned if the route does not define queries.
-func (r *Route) GetQueriesTemplates() ([]string, error) {
- if r.err != nil {
- return nil, r.err
- }
- if r.regexp == nil || r.regexp.queries == nil {
- return nil, errors.New("mux: route doesn't have queries")
- }
- var queries []string
- for _, query := range r.regexp.queries {
- queries = append(queries, query.template)
- }
- return queries, nil
-}
-
// GetMethods returns the methods the route matches against
// This is useful for building simple REST API documentation and for instrumentation
// against third-party services.
diff --git a/vendor/github.com/pierrec/lz4/LICENSE b/vendor/github.com/pierrec/lz4/LICENSE
new file mode 100644
index 00000000..bd899d83
--- /dev/null
+++ b/vendor/github.com/pierrec/lz4/LICENSE
@@ -0,0 +1,28 @@
+Copyright (c) 2015, Pierre Curto
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+* Redistributions of source code must retain the above copyright notice, this
+ list of conditions and the following disclaimer.
+
+* Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimer in the documentation
+ and/or other materials provided with the distribution.
+
+* Neither the name of xxHash nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
+FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
diff --git a/vendor/github.com/pierrec/lz4/README.md b/vendor/github.com/pierrec/lz4/README.md
new file mode 100644
index 00000000..dd3c9d47
--- /dev/null
+++ b/vendor/github.com/pierrec/lz4/README.md
@@ -0,0 +1,31 @@
+[![godoc](https://godoc.org/github.com/pierrec/lz4?status.png)](https://godoc.org/github.com/pierrec/lz4)
+[![Build Status](https://travis-ci.org/pierrec/lz4.svg?branch=master)](https://travis-ci.org/pierrec/lz4)
+
+# lz4
+LZ4 compression and decompression in pure Go
+
+## Usage
+
+```go
+import "github.com/pierrec/lz4"
+```
+
+## Description
+
+Package lz4 implements reading and writing lz4 compressed data (a frame),
+as specified in http://fastcompression.blogspot.fr/2013/04/lz4-streaming-format-final.html,
+using an io.Reader (decompression) and io.Writer (compression).
+It is designed to minimize memory usage while maximizing throughput by being able to
+[de]compress data concurrently.
+
+The Reader and the Writer support concurrent processing provided the supplied buffers are
+large enough (in multiples of BlockMaxSize) and there is no block dependency.
+Reader.WriteTo and Writer.ReadFrom do leverage the concurrency transparently.
+The runtime.GOMAXPROCS() value is used to apply concurrency or not.
+
+Although the block level compression and decompression functions are exposed and are fully compatible
+with the lz4 block format definition, they are low level and should not be used directly.
+For a complete description of an lz4 compressed block, see:
+http://fastcompression.blogspot.fr/2011/05/lz4-explained.html
+
+See https://github.com/Cyan4973/lz4 for the reference C implementation.
diff --git a/vendor/github.com/pierrec/lz4/block.go b/vendor/github.com/pierrec/lz4/block.go
new file mode 100644
index 00000000..145eec27
--- /dev/null
+++ b/vendor/github.com/pierrec/lz4/block.go
@@ -0,0 +1,445 @@
+package lz4
+
+import (
+ "encoding/binary"
+ "errors"
+)
+
+// block represents a frame data block.
+// Used when compressing or decompressing frame blocks concurrently.
+type block struct {
+ compressed bool
+ zdata []byte // compressed data
+ data []byte // decompressed data
+ offset int // offset within the data as with block dependency the 64Kb window is prepended to it
+ checksum uint32 // compressed data checksum
+ err error // error while [de]compressing
+}
+
+var (
+ // ErrInvalidSource is returned by UncompressBlock when a compressed block is corrupted.
+ ErrInvalidSource = errors.New("lz4: invalid source")
+ // ErrShortBuffer is returned by UncompressBlock, CompressBlock or CompressBlockHC when
+ // the supplied buffer for [de]compression is too small.
+ ErrShortBuffer = errors.New("lz4: short buffer")
+)
+
+// CompressBlockBound returns the maximum size of a given buffer of size n, when not compressible.
+func CompressBlockBound(n int) int {
+ return n + n/255 + 16
+}
+
+// UncompressBlock decompresses the source buffer into the destination one,
+// starting at the di index and returning the decompressed size.
+//
+// The destination buffer must be sized appropriately.
+//
+// An error is returned if the source data is invalid or the destination buffer is too small.
+func UncompressBlock(src, dst []byte, di int) (int, error) {
+ si, sn, di0 := 0, len(src), di
+ if sn == 0 {
+ return 0, nil
+ }
+
+ for {
+ // literals and match lengths (token)
+ lLen := int(src[si] >> 4)
+ mLen := int(src[si] & 0xF)
+ if si++; si == sn {
+ return di, ErrInvalidSource
+ }
+
+ // literals
+ if lLen > 0 {
+ if lLen == 0xF {
+ for src[si] == 0xFF {
+ lLen += 0xFF
+ if si++; si == sn {
+ return di - di0, ErrInvalidSource
+ }
+ }
+ lLen += int(src[si])
+ if si++; si == sn {
+ return di - di0, ErrInvalidSource
+ }
+ }
+ if len(dst)-di < lLen || si+lLen > sn {
+ return di - di0, ErrShortBuffer
+ }
+ di += copy(dst[di:], src[si:si+lLen])
+
+ if si += lLen; si >= sn {
+ return di - di0, nil
+ }
+ }
+
+ if si += 2; si >= sn {
+ return di, ErrInvalidSource
+ }
+ offset := int(src[si-2]) | int(src[si-1])<<8
+ if di-offset < 0 || offset == 0 {
+ return di - di0, ErrInvalidSource
+ }
+
+ // match
+ if mLen == 0xF {
+ for src[si] == 0xFF {
+ mLen += 0xFF
+ if si++; si == sn {
+ return di - di0, ErrInvalidSource
+ }
+ }
+ mLen += int(src[si])
+ if si++; si == sn {
+ return di - di0, ErrInvalidSource
+ }
+ }
+ // minimum match length is 4
+ mLen += 4
+ if len(dst)-di <= mLen {
+ return di - di0, ErrShortBuffer
+ }
+
+ // copy the match (NB. match is at least 4 bytes long)
+ // NB. past di, copy() would write old bytes instead of
+ // the ones we just copied, so split the work into the largest chunk.
+ for ; mLen >= offset; mLen -= offset {
+ di += copy(dst[di:], dst[di-offset:di])
+ }
+ di += copy(dst[di:], dst[di-offset:di-offset+mLen])
+ }
+}
+
+// CompressBlock compresses the source buffer starting at soffet into the destination one.
+// This is the fast version of LZ4 compression and also the default one.
+//
+// The size of the compressed data is returned. If it is 0 and no error, then the data is incompressible.
+//
+// An error is returned if the destination buffer is too small.
+func CompressBlock(src, dst []byte, soffset int) (int, error) {
+ sn, dn := len(src)-mfLimit, len(dst)
+ if sn <= 0 || dn == 0 || soffset >= sn {
+ return 0, nil
+ }
+ var si, di int
+
+ // fast scan strategy:
+ // we only need a hash table to store the last sequences (4 bytes)
+ var hashTable [1 << hashLog]int
+ var hashShift = uint((minMatch * 8) - hashLog)
+
+ // Initialise the hash table with the first 64Kb of the input buffer
+ // (used when compressing dependent blocks)
+ for si < soffset {
+ h := binary.LittleEndian.Uint32(src[si:]) * hasher >> hashShift
+ si++
+ hashTable[h] = si
+ }
+
+ anchor := si
+ fma := 1 << skipStrength
+ for si < sn-minMatch {
+ // hash the next 4 bytes (sequence)...
+ h := binary.LittleEndian.Uint32(src[si:]) * hasher >> hashShift
+ // -1 to separate existing entries from new ones
+ ref := hashTable[h] - 1
+ // ...and store the position of the hash in the hash table (+1 to compensate the -1 upon saving)
+ hashTable[h] = si + 1
+ // no need to check the last 3 bytes in the first literal 4 bytes as
+ // this guarantees that the next match, if any, is compressed with
+ // a lower size, since to have some compression we must have:
+ // ll+ml-overlap > 1 + (ll-15)/255 + (ml-4-15)/255 + 2 (uncompressed size>compressed size)
+ // => ll+ml>3+2*overlap => ll+ml>= 4+2*overlap
+ // and by definition we do have:
+ // ll >= 1, ml >= 4
+ // => ll+ml >= 5
+ // => so overlap must be 0
+
+ // the sequence is new, out of bound (64kb) or not valid: try next sequence
+ if ref < 0 || fma&(1<>winSizeLog > 0 ||
+ src[ref] != src[si] ||
+ src[ref+1] != src[si+1] ||
+ src[ref+2] != src[si+2] ||
+ src[ref+3] != src[si+3] {
+ // variable step: improves performance on non-compressible data
+ si += fma >> skipStrength
+ fma++
+ continue
+ }
+ // match found
+ fma = 1 << skipStrength
+ lLen := si - anchor
+ offset := si - ref
+
+ // encode match length part 1
+ si += minMatch
+ mLen := si // match length has minMatch already
+ for si <= sn && src[si] == src[si-offset] {
+ si++
+ }
+ mLen = si - mLen
+ if mLen < 0xF {
+ dst[di] = byte(mLen)
+ } else {
+ dst[di] = 0xF
+ }
+
+ // encode literals length
+ if lLen < 0xF {
+ dst[di] |= byte(lLen << 4)
+ } else {
+ dst[di] |= 0xF0
+ if di++; di == dn {
+ return di, ErrShortBuffer
+ }
+ l := lLen - 0xF
+ for ; l >= 0xFF; l -= 0xFF {
+ dst[di] = 0xFF
+ if di++; di == dn {
+ return di, ErrShortBuffer
+ }
+ }
+ dst[di] = byte(l)
+ }
+ if di++; di == dn {
+ return di, ErrShortBuffer
+ }
+
+ // literals
+ if di+lLen >= dn {
+ return di, ErrShortBuffer
+ }
+ di += copy(dst[di:], src[anchor:anchor+lLen])
+ anchor = si
+
+ // encode offset
+ if di += 2; di >= dn {
+ return di, ErrShortBuffer
+ }
+ dst[di-2], dst[di-1] = byte(offset), byte(offset>>8)
+
+ // encode match length part 2
+ if mLen >= 0xF {
+ for mLen -= 0xF; mLen >= 0xFF; mLen -= 0xFF {
+ dst[di] = 0xFF
+ if di++; di == dn {
+ return di, ErrShortBuffer
+ }
+ }
+ dst[di] = byte(mLen)
+ if di++; di == dn {
+ return di, ErrShortBuffer
+ }
+ }
+ }
+
+ if anchor == 0 {
+ // incompressible
+ return 0, nil
+ }
+
+ // last literals
+ lLen := len(src) - anchor
+ if lLen < 0xF {
+ dst[di] = byte(lLen << 4)
+ } else {
+ dst[di] = 0xF0
+ if di++; di == dn {
+ return di, ErrShortBuffer
+ }
+ lLen -= 0xF
+ for ; lLen >= 0xFF; lLen -= 0xFF {
+ dst[di] = 0xFF
+ if di++; di == dn {
+ return di, ErrShortBuffer
+ }
+ }
+ dst[di] = byte(lLen)
+ }
+ if di++; di == dn {
+ return di, ErrShortBuffer
+ }
+
+ // write literals
+ src = src[anchor:]
+ switch n := di + len(src); {
+ case n > dn:
+ return di, ErrShortBuffer
+ case n >= sn:
+ // incompressible
+ return 0, nil
+ }
+ di += copy(dst[di:], src)
+ return di, nil
+}
+
+// CompressBlockHC compresses the source buffer starting at soffet into the destination one.
+// CompressBlockHC compression ratio is better than CompressBlock but it is also slower.
+//
+// The size of the compressed data is returned. If it is 0 and no error, then the data is not compressible.
+//
+// An error is returned if the destination buffer is too small.
+func CompressBlockHC(src, dst []byte, soffset int) (int, error) {
+ sn, dn := len(src)-mfLimit, len(dst)
+ if sn <= 0 || dn == 0 || soffset >= sn {
+ return 0, nil
+ }
+ var si, di int
+
+ // Hash Chain strategy:
+ // we need a hash table and a chain table
+ // the chain table cannot contain more entries than the window size (64Kb entries)
+ var hashTable [1 << hashLog]int
+ var chainTable [winSize]int
+ var hashShift = uint((minMatch * 8) - hashLog)
+
+ // Initialise the hash table with the first 64Kb of the input buffer
+ // (used when compressing dependent blocks)
+ for si < soffset {
+ h := binary.LittleEndian.Uint32(src[si:]) * hasher >> hashShift
+ chainTable[si&winMask] = hashTable[h]
+ si++
+ hashTable[h] = si
+ }
+
+ anchor := si
+ for si < sn-minMatch {
+ // hash the next 4 bytes (sequence)...
+ h := binary.LittleEndian.Uint32(src[si:]) * hasher >> hashShift
+
+ // follow the chain until out of window and give the longest match
+ mLen := 0
+ offset := 0
+ for next := hashTable[h] - 1; next > 0 && next > si-winSize; next = chainTable[next&winMask] - 1 {
+ // the first (mLen==0) or next byte (mLen>=minMatch) at current match length must match to improve on the match length
+ if src[next+mLen] == src[si+mLen] {
+ for ml := 0; ; ml++ {
+ if src[next+ml] != src[si+ml] || si+ml > sn {
+ // found a longer match, keep its position and length
+ if mLen < ml && ml >= minMatch {
+ mLen = ml
+ offset = si - next
+ }
+ break
+ }
+ }
+ }
+ }
+ chainTable[si&winMask] = hashTable[h]
+ hashTable[h] = si + 1
+
+ // no match found
+ if mLen == 0 {
+ si++
+ continue
+ }
+
+ // match found
+ // update hash/chain tables with overlaping bytes:
+ // si already hashed, add everything from si+1 up to the match length
+ for si, ml := si+1, si+mLen; si < ml; {
+ h := binary.LittleEndian.Uint32(src[si:]) * hasher >> hashShift
+ chainTable[si&winMask] = hashTable[h]
+ si++
+ hashTable[h] = si
+ }
+
+ lLen := si - anchor
+ si += mLen
+ mLen -= minMatch // match length does not include minMatch
+
+ if mLen < 0xF {
+ dst[di] = byte(mLen)
+ } else {
+ dst[di] = 0xF
+ }
+
+ // encode literals length
+ if lLen < 0xF {
+ dst[di] |= byte(lLen << 4)
+ } else {
+ dst[di] |= 0xF0
+ if di++; di == dn {
+ return di, ErrShortBuffer
+ }
+ l := lLen - 0xF
+ for ; l >= 0xFF; l -= 0xFF {
+ dst[di] = 0xFF
+ if di++; di == dn {
+ return di, ErrShortBuffer
+ }
+ }
+ dst[di] = byte(l)
+ }
+ if di++; di == dn {
+ return di, ErrShortBuffer
+ }
+
+ // literals
+ if di+lLen >= dn {
+ return di, ErrShortBuffer
+ }
+ di += copy(dst[di:], src[anchor:anchor+lLen])
+ anchor = si
+
+ // encode offset
+ if di += 2; di >= dn {
+ return di, ErrShortBuffer
+ }
+ dst[di-2], dst[di-1] = byte(offset), byte(offset>>8)
+
+ // encode match length part 2
+ if mLen >= 0xF {
+ for mLen -= 0xF; mLen >= 0xFF; mLen -= 0xFF {
+ dst[di] = 0xFF
+ if di++; di == dn {
+ return di, ErrShortBuffer
+ }
+ }
+ dst[di] = byte(mLen)
+ if di++; di == dn {
+ return di, ErrShortBuffer
+ }
+ }
+ }
+
+ if anchor == 0 {
+ // incompressible
+ return 0, nil
+ }
+
+ // last literals
+ lLen := len(src) - anchor
+ if lLen < 0xF {
+ dst[di] = byte(lLen << 4)
+ } else {
+ dst[di] = 0xF0
+ if di++; di == dn {
+ return di, ErrShortBuffer
+ }
+ lLen -= 0xF
+ for ; lLen >= 0xFF; lLen -= 0xFF {
+ dst[di] = 0xFF
+ if di++; di == dn {
+ return di, ErrShortBuffer
+ }
+ }
+ dst[di] = byte(lLen)
+ }
+ if di++; di == dn {
+ return di, ErrShortBuffer
+ }
+
+ // write literals
+ src = src[anchor:]
+ switch n := di + len(src); {
+ case n > dn:
+ return di, ErrShortBuffer
+ case n >= sn:
+ // incompressible
+ return 0, nil
+ }
+ di += copy(dst[di:], src)
+ return di, nil
+}
diff --git a/vendor/github.com/pierrec/lz4/lz4.go b/vendor/github.com/pierrec/lz4/lz4.go
new file mode 100644
index 00000000..ddb82f66
--- /dev/null
+++ b/vendor/github.com/pierrec/lz4/lz4.go
@@ -0,0 +1,105 @@
+// Package lz4 implements reading and writing lz4 compressed data (a frame),
+// as specified in http://fastcompression.blogspot.fr/2013/04/lz4-streaming-format-final.html,
+// using an io.Reader (decompression) and io.Writer (compression).
+// It is designed to minimize memory usage while maximizing throughput by being able to
+// [de]compress data concurrently.
+//
+// The Reader and the Writer support concurrent processing provided the supplied buffers are
+// large enough (in multiples of BlockMaxSize) and there is no block dependency.
+// Reader.WriteTo and Writer.ReadFrom do leverage the concurrency transparently.
+// The runtime.GOMAXPROCS() value is used to apply concurrency or not.
+//
+// Although the block level compression and decompression functions are exposed and are fully compatible
+// with the lz4 block format definition, they are low level and should not be used directly.
+// For a complete description of an lz4 compressed block, see:
+// http://fastcompression.blogspot.fr/2011/05/lz4-explained.html
+//
+// See https://github.com/Cyan4973/lz4 for the reference C implementation.
+package lz4
+
+import (
+ "hash"
+ "sync"
+
+ "github.com/pierrec/xxHash/xxHash32"
+)
+
+const (
+ // Extension is the LZ4 frame file name extension
+ Extension = ".lz4"
+ // Version is the LZ4 frame format version
+ Version = 1
+
+ frameMagic = uint32(0x184D2204)
+ frameSkipMagic = uint32(0x184D2A50)
+
+ // The following constants are used to setup the compression algorithm.
+ minMatch = 4 // the minimum size of the match sequence size (4 bytes)
+ winSizeLog = 16 // LZ4 64Kb window size limit
+ winSize = 1 << winSizeLog
+ winMask = winSize - 1 // 64Kb window of previous data for dependent blocks
+
+ // hashLog determines the size of the hash table used to quickly find a previous match position.
+ // Its value influences the compression speed and memory usage, the lower the faster,
+ // but at the expense of the compression ratio.
+ // 16 seems to be the best compromise.
+ hashLog = 16
+ hashTableSize = 1 << hashLog
+ hashShift = uint((minMatch * 8) - hashLog)
+
+ mfLimit = 8 + minMatch // The last match cannot start within the last 12 bytes.
+ skipStrength = 6 // variable step for fast scan
+
+ hasher = uint32(2654435761) // prime number used to hash minMatch
+)
+
+// map the block max size id with its value in bytes: 64Kb, 256Kb, 1Mb and 4Mb.
+var bsMapID = map[byte]int{4: 64 << 10, 5: 256 << 10, 6: 1 << 20, 7: 4 << 20}
+var bsMapValue = map[int]byte{}
+
+// Reversed.
+func init() {
+ for i, v := range bsMapID {
+ bsMapValue[v] = i
+ }
+}
+
+// Header describes the various flags that can be set on a Writer or obtained from a Reader.
+// The default values match those of the LZ4 frame format definition (http://fastcompression.blogspot.com/2013/04/lz4-streaming-format-final.html).
+//
+// NB. in a Reader, in case of concatenated frames, the Header values may change between Read() calls.
+// It is the caller responsibility to check them if necessary (typically when using the Reader concurrency).
+type Header struct {
+ BlockDependency bool // compressed blocks are dependent (one block depends on the last 64Kb of the previous one)
+ BlockChecksum bool // compressed blocks are checksumed
+ NoChecksum bool // frame checksum
+ BlockMaxSize int // the size of the decompressed data block (one of [64KB, 256KB, 1MB, 4MB]). Default=4MB.
+ Size uint64 // the frame total size. It is _not_ computed by the Writer.
+ HighCompression bool // use high compression (only for the Writer)
+ done bool // whether the descriptor was processed (Read or Write and checked)
+ // Removed as not supported
+ // Dict bool // a dictionary id is to be used
+ // DictID uint32 // the dictionary id read from the frame, if any.
+}
+
+// xxhPool wraps the standard pool for xxHash items.
+// Putting items back in the pool automatically resets them.
+type xxhPool struct {
+ sync.Pool
+}
+
+func (p *xxhPool) Get() hash.Hash32 {
+ return p.Pool.Get().(hash.Hash32)
+}
+
+func (p *xxhPool) Put(h hash.Hash32) {
+ h.Reset()
+ p.Pool.Put(h)
+}
+
+// hashPool is used by readers and writers and contains xxHash items.
+var hashPool = xxhPool{
+ Pool: sync.Pool{
+ New: func() interface{} { return xxHash32.New(0) },
+ },
+}
diff --git a/vendor/github.com/pierrec/lz4/reader.go b/vendor/github.com/pierrec/lz4/reader.go
new file mode 100644
index 00000000..9f7fd604
--- /dev/null
+++ b/vendor/github.com/pierrec/lz4/reader.go
@@ -0,0 +1,364 @@
+package lz4
+
+import (
+ "encoding/binary"
+ "errors"
+ "fmt"
+ "hash"
+ "io"
+ "io/ioutil"
+ "runtime"
+ "sync"
+ "sync/atomic"
+)
+
+// ErrInvalid is returned when the data being read is not an LZ4 archive
+// (LZ4 magic number detection failed).
+var ErrInvalid = errors.New("invalid lz4 data")
+
+// errEndOfBlock is returned by readBlock when it has reached the last block of the frame.
+// It is not an error.
+var errEndOfBlock = errors.New("end of block")
+
+// Reader implements the LZ4 frame decoder.
+// The Header is set after the first call to Read().
+// The Header may change between Read() calls in case of concatenated frames.
+type Reader struct {
+ Pos int64 // position within the source
+ Header
+ src io.Reader
+ checksum hash.Hash32 // frame hash
+ wg sync.WaitGroup // decompressing go routine wait group
+ data []byte // buffered decompressed data
+ window []byte // 64Kb decompressed data window
+}
+
+// NewReader returns a new LZ4 frame decoder.
+// No access to the underlying io.Reader is performed.
+func NewReader(src io.Reader) *Reader {
+ return &Reader{
+ src: src,
+ checksum: hashPool.Get(),
+ }
+}
+
+// readHeader checks the frame magic number and parses the frame descriptoz.
+// Skippable frames are supported even as a first frame although the LZ4
+// specifications recommends skippable frames not to be used as first frames.
+func (z *Reader) readHeader(first bool) error {
+ defer z.checksum.Reset()
+
+ for {
+ var magic uint32
+ if err := binary.Read(z.src, binary.LittleEndian, &magic); err != nil {
+ if !first && err == io.ErrUnexpectedEOF {
+ return io.EOF
+ }
+ return err
+ }
+ z.Pos += 4
+ if magic>>8 == frameSkipMagic>>8 {
+ var skipSize uint32
+ if err := binary.Read(z.src, binary.LittleEndian, &skipSize); err != nil {
+ return err
+ }
+ z.Pos += 4
+ m, err := io.CopyN(ioutil.Discard, z.src, int64(skipSize))
+ z.Pos += m
+ if err != nil {
+ return err
+ }
+ continue
+ }
+ if magic != frameMagic {
+ return ErrInvalid
+ }
+ break
+ }
+
+ // header
+ var buf [8]byte
+ if _, err := io.ReadFull(z.src, buf[:2]); err != nil {
+ return err
+ }
+ z.Pos += 2
+
+ b := buf[0]
+ if b>>6 != Version {
+ return fmt.Errorf("lz4.Read: invalid version: got %d expected %d", b>>6, Version)
+ }
+ z.BlockDependency = b>>5&1 == 0
+ z.BlockChecksum = b>>4&1 > 0
+ frameSize := b>>3&1 > 0
+ z.NoChecksum = b>>2&1 == 0
+ // z.Dict = b&1 > 0
+
+ bmsID := buf[1] >> 4 & 0x7
+ bSize, ok := bsMapID[bmsID]
+ if !ok {
+ return fmt.Errorf("lz4.Read: invalid block max size: %d", bmsID)
+ }
+ z.BlockMaxSize = bSize
+
+ z.checksum.Write(buf[0:2])
+
+ if frameSize {
+ if err := binary.Read(z.src, binary.LittleEndian, &z.Size); err != nil {
+ return err
+ }
+ z.Pos += 8
+ binary.LittleEndian.PutUint64(buf[:], z.Size)
+ z.checksum.Write(buf[0:8])
+ }
+
+ // if z.Dict {
+ // if err := binary.Read(z.src, binary.LittleEndian, &z.DictID); err != nil {
+ // return err
+ // }
+ // z.Pos += 4
+ // binary.LittleEndian.PutUint32(buf[:], z.DictID)
+ // z.checksum.Write(buf[0:4])
+ // }
+
+ // header checksum
+ if _, err := io.ReadFull(z.src, buf[:1]); err != nil {
+ return err
+ }
+ z.Pos++
+ if h := byte(z.checksum.Sum32() >> 8 & 0xFF); h != buf[0] {
+ return fmt.Errorf("lz4.Read: invalid header checksum: got %v expected %v", buf[0], h)
+ }
+
+ z.Header.done = true
+
+ return nil
+}
+
+// Read decompresses data from the underlying source into the supplied buffer.
+//
+// Since there can be multiple streams concatenated, Header values may
+// change between calls to Read(). If that is the case, no data is actually read from
+// the underlying io.Reader, to allow for potential input buffer resizing.
+//
+// Data is buffered if the input buffer is too small, and exhausted upon successive calls.
+//
+// If the buffer is large enough (typically in multiples of BlockMaxSize) and there is
+// no block dependency, then the data will be decompressed concurrently based on the GOMAXPROCS value.
+func (z *Reader) Read(buf []byte) (n int, err error) {
+ if !z.Header.done {
+ if err = z.readHeader(true); err != nil {
+ return
+ }
+ }
+
+ if len(buf) == 0 {
+ return
+ }
+
+ // exhaust remaining data from previous Read()
+ if len(z.data) > 0 {
+ n = copy(buf, z.data)
+ z.data = z.data[n:]
+ if len(z.data) == 0 {
+ z.data = nil
+ }
+ return
+ }
+
+ // Break up the input buffer into BlockMaxSize blocks with at least one block.
+ // Then decompress into each of them concurrently if possible (no dependency).
+ // In case of dependency, the first block will be missing the window (except on the
+ // very first call), the rest will have it already since it comes from the previous block.
+ wbuf := buf
+ zn := (len(wbuf) + z.BlockMaxSize - 1) / z.BlockMaxSize
+ zblocks := make([]block, zn)
+ for zi, abort := 0, uint32(0); zi < zn && atomic.LoadUint32(&abort) == 0; zi++ {
+ zb := &zblocks[zi]
+ // last block may be too small
+ if len(wbuf) < z.BlockMaxSize+len(z.window) {
+ wbuf = make([]byte, z.BlockMaxSize+len(z.window))
+ }
+ copy(wbuf, z.window)
+ if zb.err = z.readBlock(wbuf, zb); zb.err != nil {
+ break
+ }
+ wbuf = wbuf[z.BlockMaxSize:]
+ if !z.BlockDependency {
+ z.wg.Add(1)
+ go z.decompressBlock(zb, &abort)
+ continue
+ }
+ // cannot decompress concurrently when dealing with block dependency
+ z.decompressBlock(zb, nil)
+ // the last block may not contain enough data
+ if len(z.window) == 0 {
+ z.window = make([]byte, winSize)
+ }
+ if len(zb.data) >= winSize {
+ copy(z.window, zb.data[len(zb.data)-winSize:])
+ } else {
+ copy(z.window, z.window[len(zb.data):])
+ copy(z.window[len(zb.data)+1:], zb.data)
+ }
+ }
+ z.wg.Wait()
+
+ // since a block size may be less then BlockMaxSize, trim the decompressed buffers
+ for _, zb := range zblocks {
+ if zb.err != nil {
+ if zb.err == errEndOfBlock {
+ return n, z.close()
+ }
+ return n, zb.err
+ }
+ bLen := len(zb.data)
+ if !z.NoChecksum {
+ z.checksum.Write(zb.data)
+ }
+ m := copy(buf[n:], zb.data)
+ // buffer the remaining data (this is necessarily the last block)
+ if m < bLen {
+ z.data = zb.data[m:]
+ }
+ n += m
+ }
+
+ return
+}
+
+// readBlock reads an entire frame block from the frame.
+// The input buffer is the one that will receive the decompressed data.
+// If the end of the frame is detected, it returns the errEndOfBlock error.
+func (z *Reader) readBlock(buf []byte, b *block) error {
+ var bLen uint32
+ if err := binary.Read(z.src, binary.LittleEndian, &bLen); err != nil {
+ return err
+ }
+ atomic.AddInt64(&z.Pos, 4)
+
+ switch {
+ case bLen == 0:
+ return errEndOfBlock
+ case bLen&(1<<31) == 0:
+ b.compressed = true
+ b.data = buf
+ b.zdata = make([]byte, bLen)
+ default:
+ bLen = bLen & (1<<31 - 1)
+ if int(bLen) > len(buf) {
+ return fmt.Errorf("lz4.Read: invalid block size: %d", bLen)
+ }
+ b.data = buf[:bLen]
+ b.zdata = buf[:bLen]
+ }
+ if _, err := io.ReadFull(z.src, b.zdata); err != nil {
+ return err
+ }
+
+ if z.BlockChecksum {
+ if err := binary.Read(z.src, binary.LittleEndian, &b.checksum); err != nil {
+ return err
+ }
+ xxh := hashPool.Get()
+ defer hashPool.Put(xxh)
+ xxh.Write(b.zdata)
+ if h := xxh.Sum32(); h != b.checksum {
+ return fmt.Errorf("lz4.Read: invalid block checksum: got %x expected %x", h, b.checksum)
+ }
+ }
+
+ return nil
+}
+
+// decompressBlock decompresses a frame block.
+// In case of an error, the block err is set with it and abort is set to 1.
+func (z *Reader) decompressBlock(b *block, abort *uint32) {
+ if abort != nil {
+ defer z.wg.Done()
+ }
+ if b.compressed {
+ n := len(z.window)
+ m, err := UncompressBlock(b.zdata, b.data, n)
+ if err != nil {
+ if abort != nil {
+ atomic.StoreUint32(abort, 1)
+ }
+ b.err = err
+ return
+ }
+ b.data = b.data[n : n+m]
+ }
+ atomic.AddInt64(&z.Pos, int64(len(b.data)))
+}
+
+// close validates the frame checksum (if any) and checks the next frame (if any).
+func (z *Reader) close() error {
+ if !z.NoChecksum {
+ var checksum uint32
+ if err := binary.Read(z.src, binary.LittleEndian, &checksum); err != nil {
+ return err
+ }
+ if checksum != z.checksum.Sum32() {
+ return fmt.Errorf("lz4.Read: invalid frame checksum: got %x expected %x", z.checksum.Sum32(), checksum)
+ }
+ }
+
+ // get ready for the next concatenated frame, but do not change the position
+ pos := z.Pos
+ z.Reset(z.src)
+ z.Pos = pos
+
+ // since multiple frames can be concatenated, check for another one
+ return z.readHeader(false)
+}
+
+// Reset discards the Reader's state and makes it equivalent to the
+// result of its original state from NewReader, but reading from r instead.
+// This permits reusing a Reader rather than allocating a new one.
+func (z *Reader) Reset(r io.Reader) {
+ z.Header = Header{}
+ z.Pos = 0
+ z.src = r
+ z.checksum.Reset()
+ z.data = nil
+ z.window = nil
+}
+
+// WriteTo decompresses the data from the underlying io.Reader and writes it to the io.Writer.
+// Returns the number of bytes written.
+func (z *Reader) WriteTo(w io.Writer) (n int64, err error) {
+ cpus := runtime.GOMAXPROCS(0)
+ var buf []byte
+
+ // The initial buffer being nil, the first Read will be only read the compressed frame options.
+ // The buffer can then be sized appropriately to support maximum concurrency decompression.
+ // If multiple frames are concatenated, Read() will return with no data decompressed but with
+ // potentially changed options. The buffer will be resized accordingly, always trying to
+ // maximize concurrency.
+ for {
+ nsize := 0
+ // the block max size can change if multiple streams are concatenated.
+ // Check it after every Read().
+ if z.BlockDependency {
+ // in case of dependency, we cannot decompress concurrently,
+ // so allocate the minimum buffer + window size
+ nsize = len(z.window) + z.BlockMaxSize
+ } else {
+ // if no dependency, allocate a buffer large enough for concurrent decompression
+ nsize = cpus * z.BlockMaxSize
+ }
+ if nsize != len(buf) {
+ buf = make([]byte, nsize)
+ }
+
+ m, er := z.Read(buf)
+ if er != nil && er != io.EOF {
+ return n, er
+ }
+ m, err = w.Write(buf[:m])
+ n += int64(m)
+ if err != nil || er == io.EOF {
+ return
+ }
+ }
+}
diff --git a/vendor/github.com/pierrec/lz4/writer.go b/vendor/github.com/pierrec/lz4/writer.go
new file mode 100644
index 00000000..b1b712fe
--- /dev/null
+++ b/vendor/github.com/pierrec/lz4/writer.go
@@ -0,0 +1,377 @@
+package lz4
+
+import (
+ "encoding/binary"
+ "fmt"
+ "hash"
+ "io"
+ "runtime"
+)
+
+// Writer implements the LZ4 frame encoder.
+type Writer struct {
+ Header
+ dst io.Writer
+ checksum hash.Hash32 // frame checksum
+ data []byte // data to be compressed, only used when dealing with block dependency as we need 64Kb to work with
+ window []byte // last 64KB of decompressed data (block dependency) + blockMaxSize buffer
+
+ zbCompressBuf []byte // buffer for compressing lz4 blocks
+ writeSizeBuf []byte // four-byte slice for writing checksums and sizes in writeblock
+}
+
+// NewWriter returns a new LZ4 frame encoder.
+// No access to the underlying io.Writer is performed.
+// The supplied Header is checked at the first Write.
+// It is ok to change it before the first Write but then not until a Reset() is performed.
+func NewWriter(dst io.Writer) *Writer {
+ return &Writer{
+ dst: dst,
+ checksum: hashPool.Get(),
+ Header: Header{
+ BlockMaxSize: 4 << 20,
+ },
+ writeSizeBuf: make([]byte, 4),
+ }
+}
+
+// writeHeader builds and writes the header (magic+header) to the underlying io.Writer.
+func (z *Writer) writeHeader() error {
+ // Default to 4Mb if BlockMaxSize is not set
+ if z.Header.BlockMaxSize == 0 {
+ z.Header.BlockMaxSize = 4 << 20
+ }
+ // the only option that need to be validated
+ bSize, ok := bsMapValue[z.Header.BlockMaxSize]
+ if !ok {
+ return fmt.Errorf("lz4: invalid block max size: %d", z.Header.BlockMaxSize)
+ }
+
+ // magic number(4) + header(flags(2)+[Size(8)+DictID(4)]+checksum(1)) does not exceed 19 bytes
+ // Size and DictID are optional
+ var buf [19]byte
+
+ // set the fixed size data: magic number, block max size and flags
+ binary.LittleEndian.PutUint32(buf[0:], frameMagic)
+ flg := byte(Version << 6)
+ if !z.Header.BlockDependency {
+ flg |= 1 << 5
+ }
+ if z.Header.BlockChecksum {
+ flg |= 1 << 4
+ }
+ if z.Header.Size > 0 {
+ flg |= 1 << 3
+ }
+ if !z.Header.NoChecksum {
+ flg |= 1 << 2
+ }
+ // if z.Header.Dict {
+ // flg |= 1
+ // }
+ buf[4] = flg
+ buf[5] = bSize << 4
+
+ // current buffer size: magic(4) + flags(1) + block max size (1)
+ n := 6
+ // optional items
+ if z.Header.Size > 0 {
+ binary.LittleEndian.PutUint64(buf[n:], z.Header.Size)
+ n += 8
+ }
+ // if z.Header.Dict {
+ // binary.LittleEndian.PutUint32(buf[n:], z.Header.DictID)
+ // n += 4
+ // }
+
+ // header checksum includes the flags, block max size and optional Size and DictID
+ z.checksum.Write(buf[4:n])
+ buf[n] = byte(z.checksum.Sum32() >> 8 & 0xFF)
+ z.checksum.Reset()
+
+ // header ready, write it out
+ if _, err := z.dst.Write(buf[0 : n+1]); err != nil {
+ return err
+ }
+ z.Header.done = true
+
+ // initialize buffers dependent on header info
+ z.zbCompressBuf = make([]byte, winSize+z.BlockMaxSize)
+
+ return nil
+}
+
+// Write compresses data from the supplied buffer into the underlying io.Writer.
+// Write does not return until the data has been written.
+//
+// If the input buffer is large enough (typically in multiples of BlockMaxSize)
+// the data will be compressed concurrently.
+//
+// Write never buffers any data unless in BlockDependency mode where it may
+// do so until it has 64Kb of data, after which it never buffers any.
+func (z *Writer) Write(buf []byte) (n int, err error) {
+ if !z.Header.done {
+ if err = z.writeHeader(); err != nil {
+ return
+ }
+ }
+
+ if len(buf) == 0 {
+ return
+ }
+
+ if !z.NoChecksum {
+ z.checksum.Write(buf)
+ }
+
+ // with block dependency, require at least 64Kb of data to work with
+ // not having 64Kb only matters initially to setup the first window
+ bl := 0
+ if z.BlockDependency && len(z.window) == 0 {
+ bl = len(z.data)
+ z.data = append(z.data, buf...)
+ if len(z.data) < winSize {
+ return len(buf), nil
+ }
+ buf = z.data
+ z.data = nil
+ }
+
+ // Break up the input buffer into BlockMaxSize blocks, provisioning the left over block.
+ // Then compress into each of them concurrently if possible (no dependency).
+ var (
+ zb block
+ wbuf = buf
+ zn = len(wbuf) / z.BlockMaxSize
+ zi = 0
+ leftover = len(buf) % z.BlockMaxSize
+ )
+
+loop:
+ for zi < zn {
+ if z.BlockDependency {
+ if zi == 0 {
+ // first block does not have the window
+ zb.data = append(z.window, wbuf[:z.BlockMaxSize]...)
+ zb.offset = len(z.window)
+ wbuf = wbuf[z.BlockMaxSize-winSize:]
+ } else {
+ // set the uncompressed data including the window from previous block
+ zb.data = wbuf[:z.BlockMaxSize+winSize]
+ zb.offset = winSize
+ wbuf = wbuf[z.BlockMaxSize:]
+ }
+ } else {
+ zb.data = wbuf[:z.BlockMaxSize]
+ wbuf = wbuf[z.BlockMaxSize:]
+ }
+
+ goto write
+ }
+
+ // left over
+ if leftover > 0 {
+ zb = block{data: wbuf}
+ if z.BlockDependency {
+ if zn == 0 {
+ zb.data = append(z.window, zb.data...)
+ zb.offset = len(z.window)
+ } else {
+ zb.offset = winSize
+ }
+ }
+
+ leftover = 0
+ goto write
+ }
+
+ if z.BlockDependency {
+ if len(z.window) == 0 {
+ z.window = make([]byte, winSize)
+ }
+ // last buffer may be shorter than the window
+ if len(buf) >= winSize {
+ copy(z.window, buf[len(buf)-winSize:])
+ } else {
+ copy(z.window, z.window[len(buf):])
+ copy(z.window[len(buf)+1:], buf)
+ }
+ }
+
+ return
+
+write:
+ zb = z.compressBlock(zb)
+ _, err = z.writeBlock(zb)
+
+ written := len(zb.data)
+ if bl > 0 {
+ if written >= bl {
+ written -= bl
+ bl = 0
+ } else {
+ bl -= written
+ written = 0
+ }
+ }
+
+ n += written
+ // remove the window in zb.data
+ if z.BlockDependency {
+ if zi == 0 {
+ n -= len(z.window)
+ } else {
+ n -= winSize
+ }
+ }
+ if err != nil {
+ return
+ }
+ zi++
+ goto loop
+}
+
+// compressBlock compresses a block.
+func (z *Writer) compressBlock(zb block) block {
+ // compressed block size cannot exceed the input's
+ var (
+ n int
+ err error
+ zbuf = z.zbCompressBuf
+ )
+ if z.HighCompression {
+ n, err = CompressBlockHC(zb.data, zbuf, zb.offset)
+ } else {
+ n, err = CompressBlock(zb.data, zbuf, zb.offset)
+ }
+
+ // compressible and compressed size smaller than decompressed: ok!
+ if err == nil && n > 0 && len(zb.zdata) < len(zb.data) {
+ zb.compressed = true
+ zb.zdata = zbuf[:n]
+ } else {
+ zb.compressed = false
+ zb.zdata = zb.data[zb.offset:]
+ }
+
+ if z.BlockChecksum {
+ xxh := hashPool.Get()
+ xxh.Write(zb.zdata)
+ zb.checksum = xxh.Sum32()
+ hashPool.Put(xxh)
+ }
+
+ return zb
+}
+
+// writeBlock writes a frame block to the underlying io.Writer (size, data).
+func (z *Writer) writeBlock(zb block) (int, error) {
+ bLen := uint32(len(zb.zdata))
+ if !zb.compressed {
+ bLen |= 1 << 31
+ }
+
+ n := 0
+
+ binary.LittleEndian.PutUint32(z.writeSizeBuf, bLen)
+ n, err := z.dst.Write(z.writeSizeBuf)
+ if err != nil {
+ return n, err
+ }
+
+ m, err := z.dst.Write(zb.zdata)
+ n += m
+ if err != nil {
+ return n, err
+ }
+
+ if z.BlockChecksum {
+ binary.LittleEndian.PutUint32(z.writeSizeBuf, zb.checksum)
+ m, err := z.dst.Write(z.writeSizeBuf)
+ n += m
+
+ if err != nil {
+ return n, err
+ }
+ }
+
+ return n, nil
+}
+
+// Flush flushes any pending compressed data to the underlying writer.
+// Flush does not return until the data has been written.
+// If the underlying writer returns an error, Flush returns that error.
+//
+// Flush is only required when in BlockDependency mode and the total of
+// data written is less than 64Kb.
+func (z *Writer) Flush() error {
+ if len(z.data) == 0 {
+ return nil
+ }
+
+ zb := z.compressBlock(block{data: z.data})
+ if _, err := z.writeBlock(zb); err != nil {
+ return err
+ }
+ return nil
+}
+
+// Close closes the Writer, flushing any unwritten data to the underlying io.Writer, but does not close the underlying io.Writer.
+func (z *Writer) Close() error {
+ if !z.Header.done {
+ if err := z.writeHeader(); err != nil {
+ return err
+ }
+ }
+
+ // buffered data for the block dependency window
+ if z.BlockDependency && len(z.data) > 0 {
+ zb := block{data: z.data}
+ if _, err := z.writeBlock(z.compressBlock(zb)); err != nil {
+ return err
+ }
+ }
+
+ if err := binary.Write(z.dst, binary.LittleEndian, uint32(0)); err != nil {
+ return err
+ }
+ if !z.NoChecksum {
+ if err := binary.Write(z.dst, binary.LittleEndian, z.checksum.Sum32()); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// Reset clears the state of the Writer z such that it is equivalent to its
+// initial state from NewWriter, but instead writing to w.
+// No access to the underlying io.Writer is performed.
+func (z *Writer) Reset(w io.Writer) {
+ z.Header = Header{}
+ z.dst = w
+ z.checksum.Reset()
+ z.data = nil
+ z.window = nil
+}
+
+// ReadFrom compresses the data read from the io.Reader and writes it to the underlying io.Writer.
+// Returns the number of bytes read.
+// It does not close the Writer.
+func (z *Writer) ReadFrom(r io.Reader) (n int64, err error) {
+ cpus := runtime.GOMAXPROCS(0)
+ buf := make([]byte, cpus*z.BlockMaxSize)
+ for {
+ m, er := io.ReadFull(r, buf)
+ n += int64(m)
+ if er == nil || er == io.ErrUnexpectedEOF || er == io.EOF {
+ if _, err = z.Write(buf[:m]); err != nil {
+ return
+ }
+ if er == nil {
+ continue
+ }
+ return
+ }
+ return n, er
+ }
+}
diff --git a/vendor/github.com/pierrec/xxHash/LICENSE b/vendor/github.com/pierrec/xxHash/LICENSE
new file mode 100644
index 00000000..c1418f3f
--- /dev/null
+++ b/vendor/github.com/pierrec/xxHash/LICENSE
@@ -0,0 +1,28 @@
+Copyright (c) 2014, Pierre Curto
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+* Redistributions of source code must retain the above copyright notice, this
+ list of conditions and the following disclaimer.
+
+* Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimer in the documentation
+ and/or other materials provided with the distribution.
+
+* Neither the name of xxHash nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
+FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
diff --git a/vendor/github.com/pierrec/xxHash/xxHash32/xxHash32.go b/vendor/github.com/pierrec/xxHash/xxHash32/xxHash32.go
new file mode 100644
index 00000000..411504e4
--- /dev/null
+++ b/vendor/github.com/pierrec/xxHash/xxHash32/xxHash32.go
@@ -0,0 +1,205 @@
+// Package xxHash32 implements the very fast xxHash hashing algorithm (32 bits version).
+// (https://github.com/Cyan4973/xxHash/)
+package xxHash32
+
+import "hash"
+
+const (
+ prime32_1 = 2654435761
+ prime32_2 = 2246822519
+ prime32_3 = 3266489917
+ prime32_4 = 668265263
+ prime32_5 = 374761393
+)
+
+type xxHash struct {
+ seed uint32
+ v1 uint32
+ v2 uint32
+ v3 uint32
+ v4 uint32
+ totalLen uint64
+ buf [16]byte
+ bufused int
+}
+
+// New returns a new Hash32 instance.
+func New(seed uint32) hash.Hash32 {
+ xxh := &xxHash{seed: seed}
+ xxh.Reset()
+ return xxh
+}
+
+// Sum appends the current hash to b and returns the resulting slice.
+// It does not change the underlying hash state.
+func (xxh xxHash) Sum(b []byte) []byte {
+ h32 := xxh.Sum32()
+ return append(b, byte(h32), byte(h32>>8), byte(h32>>16), byte(h32>>24))
+}
+
+// Reset resets the Hash to its initial state.
+func (xxh *xxHash) Reset() {
+ xxh.v1 = xxh.seed + prime32_1 + prime32_2
+ xxh.v2 = xxh.seed + prime32_2
+ xxh.v3 = xxh.seed
+ xxh.v4 = xxh.seed - prime32_1
+ xxh.totalLen = 0
+ xxh.bufused = 0
+}
+
+// Size returns the number of bytes returned by Sum().
+func (xxh *xxHash) Size() int {
+ return 4
+}
+
+// BlockSize gives the minimum number of bytes accepted by Write().
+func (xxh *xxHash) BlockSize() int {
+ return 1
+}
+
+// Write adds input bytes to the Hash.
+// It never returns an error.
+func (xxh *xxHash) Write(input []byte) (int, error) {
+ n := len(input)
+ m := xxh.bufused
+
+ xxh.totalLen += uint64(n)
+
+ r := len(xxh.buf) - m
+ if n < r {
+ copy(xxh.buf[m:], input)
+ xxh.bufused += len(input)
+ return n, nil
+ }
+
+ p := 0
+ if m > 0 {
+ // some data left from previous update
+ copy(xxh.buf[xxh.bufused:], input[:r])
+ xxh.bufused += len(input) - r
+
+ // fast rotl(13)
+ p32 := xxh.v1 + (uint32(xxh.buf[p+3])<<24|uint32(xxh.buf[p+2])<<16|uint32(xxh.buf[p+1])<<8|uint32(xxh.buf[p]))*prime32_2
+ xxh.v1 = (p32<<13 | p32>>19) * prime32_1
+ p += 4
+ p32 = xxh.v2 + (uint32(xxh.buf[p+3])<<24|uint32(xxh.buf[p+2])<<16|uint32(xxh.buf[p+1])<<8|uint32(xxh.buf[p]))*prime32_2
+ xxh.v2 = (p32<<13 | p32>>19) * prime32_1
+ p += 4
+ p32 = xxh.v3 + (uint32(xxh.buf[p+3])<<24|uint32(xxh.buf[p+2])<<16|uint32(xxh.buf[p+1])<<8|uint32(xxh.buf[p]))*prime32_2
+ xxh.v3 = (p32<<13 | p32>>19) * prime32_1
+ p += 4
+ p32 = xxh.v4 + (uint32(xxh.buf[p+3])<<24|uint32(xxh.buf[p+2])<<16|uint32(xxh.buf[p+1])<<8|uint32(xxh.buf[p]))*prime32_2
+ xxh.v4 = (p32<<13 | p32>>19) * prime32_1
+
+ p = r
+ xxh.bufused = 0
+ }
+
+ for n := n - 16; p <= n; {
+ p32 := xxh.v1 + (uint32(input[p+3])<<24|uint32(input[p+2])<<16|uint32(input[p+1])<<8|uint32(input[p]))*prime32_2
+ xxh.v1 = (p32<<13 | p32>>19) * prime32_1
+ p += 4
+ p32 = xxh.v2 + (uint32(input[p+3])<<24|uint32(input[p+2])<<16|uint32(input[p+1])<<8|uint32(input[p]))*prime32_2
+ xxh.v2 = (p32<<13 | p32>>19) * prime32_1
+ p += 4
+ p32 = xxh.v3 + (uint32(input[p+3])<<24|uint32(input[p+2])<<16|uint32(input[p+1])<<8|uint32(input[p]))*prime32_2
+ xxh.v3 = (p32<<13 | p32>>19) * prime32_1
+ p += 4
+ p32 = xxh.v4 + (uint32(input[p+3])<<24|uint32(input[p+2])<<16|uint32(input[p+1])<<8|uint32(input[p]))*prime32_2
+ xxh.v4 = (p32<<13 | p32>>19) * prime32_1
+ p += 4
+ }
+
+ copy(xxh.buf[xxh.bufused:], input[p:])
+ xxh.bufused += len(input) - p
+
+ return n, nil
+}
+
+// Sum32 returns the 32 bits Hash value.
+func (xxh *xxHash) Sum32() uint32 {
+ h32 := uint32(xxh.totalLen)
+ if xxh.totalLen >= 16 {
+ h32 += ((xxh.v1 << 1) | (xxh.v1 >> 31)) +
+ ((xxh.v2 << 7) | (xxh.v2 >> 25)) +
+ ((xxh.v3 << 12) | (xxh.v3 >> 20)) +
+ ((xxh.v4 << 18) | (xxh.v4 >> 14))
+ } else {
+ h32 += xxh.seed + prime32_5
+ }
+
+ p := 0
+ n := xxh.bufused
+ for n := n - 4; p <= n; p += 4 {
+ h32 += (uint32(xxh.buf[p+3])<<24 | uint32(xxh.buf[p+2])<<16 | uint32(xxh.buf[p+1])<<8 | uint32(xxh.buf[p])) * prime32_3
+ h32 = ((h32 << 17) | (h32 >> 15)) * prime32_4
+ }
+ for ; p < n; p++ {
+ h32 += uint32(xxh.buf[p]) * prime32_5
+ h32 = ((h32 << 11) | (h32 >> 21)) * prime32_1
+ }
+
+ h32 ^= h32 >> 15
+ h32 *= prime32_2
+ h32 ^= h32 >> 13
+ h32 *= prime32_3
+ h32 ^= h32 >> 16
+
+ return h32
+}
+
+// Checksum returns the 32bits Hash value.
+func Checksum(input []byte, seed uint32) uint32 {
+ n := len(input)
+ h32 := uint32(n)
+
+ if n < 16 {
+ h32 += seed + prime32_5
+ } else {
+ v1 := seed + prime32_1 + prime32_2
+ v2 := seed + prime32_2
+ v3 := seed
+ v4 := seed - prime32_1
+ p := 0
+ for p <= n-16 {
+ v1 += (uint32(input[p+3])<<24 | uint32(input[p+2])<<16 | uint32(input[p+1])<<8 | uint32(input[p])) * prime32_2
+ v1 = (v1<<13 | v1>>19) * prime32_1
+ p += 4
+ v2 += (uint32(input[p+3])<<24 | uint32(input[p+2])<<16 | uint32(input[p+1])<<8 | uint32(input[p])) * prime32_2
+ v2 = (v2<<13 | v2>>19) * prime32_1
+ p += 4
+ v3 += (uint32(input[p+3])<<24 | uint32(input[p+2])<<16 | uint32(input[p+1])<<8 | uint32(input[p])) * prime32_2
+ v3 = (v3<<13 | v3>>19) * prime32_1
+ p += 4
+ v4 += (uint32(input[p+3])<<24 | uint32(input[p+2])<<16 | uint32(input[p+1])<<8 | uint32(input[p])) * prime32_2
+ v4 = (v4<<13 | v4>>19) * prime32_1
+ p += 4
+ }
+ input = input[p:]
+ n -= p
+ h32 += ((v1 << 1) | (v1 >> 31)) +
+ ((v2 << 7) | (v2 >> 25)) +
+ ((v3 << 12) | (v3 >> 20)) +
+ ((v4 << 18) | (v4 >> 14))
+ }
+
+ p := 0
+ for p <= n-4 {
+ h32 += (uint32(input[p+3])<<24 | uint32(input[p+2])<<16 | uint32(input[p+1])<<8 | uint32(input[p])) * prime32_3
+ h32 = ((h32 << 17) | (h32 >> 15)) * prime32_4
+ p += 4
+ }
+ for p < n {
+ h32 += uint32(input[p]) * prime32_5
+ h32 = ((h32 << 11) | (h32 >> 21)) * prime32_1
+ p++
+ }
+
+ h32 ^= h32 >> 15
+ h32 *= prime32_2
+ h32 ^= h32 >> 13
+ h32 *= prime32_3
+ h32 ^= h32 >> 16
+
+ return h32
+}
diff --git a/vendor/github.com/pkg/errors/LICENSE b/vendor/github.com/pkg/errors/LICENSE
new file mode 100644
index 00000000..835ba3e7
--- /dev/null
+++ b/vendor/github.com/pkg/errors/LICENSE
@@ -0,0 +1,23 @@
+Copyright (c) 2015, Dave Cheney
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+* Redistributions of source code must retain the above copyright notice, this
+ list of conditions and the following disclaimer.
+
+* Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimer in the documentation
+ and/or other materials provided with the distribution.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
+FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/pkg/errors/README.md b/vendor/github.com/pkg/errors/README.md
new file mode 100644
index 00000000..273db3c9
--- /dev/null
+++ b/vendor/github.com/pkg/errors/README.md
@@ -0,0 +1,52 @@
+# errors [![Travis-CI](https://travis-ci.org/pkg/errors.svg)](https://travis-ci.org/pkg/errors) [![AppVeyor](https://ci.appveyor.com/api/projects/status/b98mptawhudj53ep/branch/master?svg=true)](https://ci.appveyor.com/project/davecheney/errors/branch/master) [![GoDoc](https://godoc.org/github.com/pkg/errors?status.svg)](http://godoc.org/github.com/pkg/errors) [![Report card](https://goreportcard.com/badge/github.com/pkg/errors)](https://goreportcard.com/report/github.com/pkg/errors)
+
+Package errors provides simple error handling primitives.
+
+`go get github.com/pkg/errors`
+
+The traditional error handling idiom in Go is roughly akin to
+```go
+if err != nil {
+ return err
+}
+```
+which applied recursively up the call stack results in error reports without context or debugging information. The errors package allows programmers to add context to the failure path in their code in a way that does not destroy the original value of the error.
+
+## Adding context to an error
+
+The errors.Wrap function returns a new error that adds context to the original error. For example
+```go
+_, err := ioutil.ReadAll(r)
+if err != nil {
+ return errors.Wrap(err, "read failed")
+}
+```
+## Retrieving the cause of an error
+
+Using `errors.Wrap` constructs a stack of errors, adding context to the preceding error. Depending on the nature of the error it may be necessary to reverse the operation of errors.Wrap to retrieve the original error for inspection. Any error value which implements this interface can be inspected by `errors.Cause`.
+```go
+type causer interface {
+ Cause() error
+}
+```
+`errors.Cause` will recursively retrieve the topmost error which does not implement `causer`, which is assumed to be the original cause. For example:
+```go
+switch err := errors.Cause(err).(type) {
+case *MyError:
+ // handle specifically
+default:
+ // unknown error
+}
+```
+
+[Read the package documentation for more information](https://godoc.org/github.com/pkg/errors).
+
+## Contributing
+
+We welcome pull requests, bug fixes and issue reports. With that said, the bar for adding new symbols to this package is intentionally set high.
+
+Before proposing a change, please discuss your change by raising an issue.
+
+## Licence
+
+BSD-2-Clause
diff --git a/vendor/github.com/pkg/errors/appveyor.yml b/vendor/github.com/pkg/errors/appveyor.yml
new file mode 100644
index 00000000..a932eade
--- /dev/null
+++ b/vendor/github.com/pkg/errors/appveyor.yml
@@ -0,0 +1,32 @@
+version: build-{build}.{branch}
+
+clone_folder: C:\gopath\src\github.com\pkg\errors
+shallow_clone: true # for startup speed
+
+environment:
+ GOPATH: C:\gopath
+
+platform:
+ - x64
+
+# http://www.appveyor.com/docs/installed-software
+install:
+ # some helpful output for debugging builds
+ - go version
+ - go env
+ # pre-installed MinGW at C:\MinGW is 32bit only
+ # but MSYS2 at C:\msys64 has mingw64
+ - set PATH=C:\msys64\mingw64\bin;%PATH%
+ - gcc --version
+ - g++ --version
+
+build_script:
+ - go install -v ./...
+
+test_script:
+ - set PATH=C:\gopath\bin;%PATH%
+ - go test -v ./...
+
+#artifacts:
+# - path: '%GOPATH%\bin\*.exe'
+deploy: off
diff --git a/vendor/github.com/pkg/errors/errors.go b/vendor/github.com/pkg/errors/errors.go
new file mode 100644
index 00000000..842ee804
--- /dev/null
+++ b/vendor/github.com/pkg/errors/errors.go
@@ -0,0 +1,269 @@
+// Package errors provides simple error handling primitives.
+//
+// The traditional error handling idiom in Go is roughly akin to
+//
+// if err != nil {
+// return err
+// }
+//
+// which applied recursively up the call stack results in error reports
+// without context or debugging information. The errors package allows
+// programmers to add context to the failure path in their code in a way
+// that does not destroy the original value of the error.
+//
+// Adding context to an error
+//
+// The errors.Wrap function returns a new error that adds context to the
+// original error by recording a stack trace at the point Wrap is called,
+// and the supplied message. For example
+//
+// _, err := ioutil.ReadAll(r)
+// if err != nil {
+// return errors.Wrap(err, "read failed")
+// }
+//
+// If additional control is required the errors.WithStack and errors.WithMessage
+// functions destructure errors.Wrap into its component operations of annotating
+// an error with a stack trace and an a message, respectively.
+//
+// Retrieving the cause of an error
+//
+// Using errors.Wrap constructs a stack of errors, adding context to the
+// preceding error. Depending on the nature of the error it may be necessary
+// to reverse the operation of errors.Wrap to retrieve the original error
+// for inspection. Any error value which implements this interface
+//
+// type causer interface {
+// Cause() error
+// }
+//
+// can be inspected by errors.Cause. errors.Cause will recursively retrieve
+// the topmost error which does not implement causer, which is assumed to be
+// the original cause. For example:
+//
+// switch err := errors.Cause(err).(type) {
+// case *MyError:
+// // handle specifically
+// default:
+// // unknown error
+// }
+//
+// causer interface is not exported by this package, but is considered a part
+// of stable public API.
+//
+// Formatted printing of errors
+//
+// All error values returned from this package implement fmt.Formatter and can
+// be formatted by the fmt package. The following verbs are supported
+//
+// %s print the error. If the error has a Cause it will be
+// printed recursively
+// %v see %s
+// %+v extended format. Each Frame of the error's StackTrace will
+// be printed in detail.
+//
+// Retrieving the stack trace of an error or wrapper
+//
+// New, Errorf, Wrap, and Wrapf record a stack trace at the point they are
+// invoked. This information can be retrieved with the following interface.
+//
+// type stackTracer interface {
+// StackTrace() errors.StackTrace
+// }
+//
+// Where errors.StackTrace is defined as
+//
+// type StackTrace []Frame
+//
+// The Frame type represents a call site in the stack trace. Frame supports
+// the fmt.Formatter interface that can be used for printing information about
+// the stack trace of this error. For example:
+//
+// if err, ok := err.(stackTracer); ok {
+// for _, f := range err.StackTrace() {
+// fmt.Printf("%+s:%d", f)
+// }
+// }
+//
+// stackTracer interface is not exported by this package, but is considered a part
+// of stable public API.
+//
+// See the documentation for Frame.Format for more details.
+package errors
+
+import (
+ "fmt"
+ "io"
+)
+
+// New returns an error with the supplied message.
+// New also records the stack trace at the point it was called.
+func New(message string) error {
+ return &fundamental{
+ msg: message,
+ stack: callers(),
+ }
+}
+
+// Errorf formats according to a format specifier and returns the string
+// as a value that satisfies error.
+// Errorf also records the stack trace at the point it was called.
+func Errorf(format string, args ...interface{}) error {
+ return &fundamental{
+ msg: fmt.Sprintf(format, args...),
+ stack: callers(),
+ }
+}
+
+// fundamental is an error that has a message and a stack, but no caller.
+type fundamental struct {
+ msg string
+ *stack
+}
+
+func (f *fundamental) Error() string { return f.msg }
+
+func (f *fundamental) Format(s fmt.State, verb rune) {
+ switch verb {
+ case 'v':
+ if s.Flag('+') {
+ io.WriteString(s, f.msg)
+ f.stack.Format(s, verb)
+ return
+ }
+ fallthrough
+ case 's':
+ io.WriteString(s, f.msg)
+ case 'q':
+ fmt.Fprintf(s, "%q", f.msg)
+ }
+}
+
+// WithStack annotates err with a stack trace at the point WithStack was called.
+// If err is nil, WithStack returns nil.
+func WithStack(err error) error {
+ if err == nil {
+ return nil
+ }
+ return &withStack{
+ err,
+ callers(),
+ }
+}
+
+type withStack struct {
+ error
+ *stack
+}
+
+func (w *withStack) Cause() error { return w.error }
+
+func (w *withStack) Format(s fmt.State, verb rune) {
+ switch verb {
+ case 'v':
+ if s.Flag('+') {
+ fmt.Fprintf(s, "%+v", w.Cause())
+ w.stack.Format(s, verb)
+ return
+ }
+ fallthrough
+ case 's':
+ io.WriteString(s, w.Error())
+ case 'q':
+ fmt.Fprintf(s, "%q", w.Error())
+ }
+}
+
+// Wrap returns an error annotating err with a stack trace
+// at the point Wrap is called, and the supplied message.
+// If err is nil, Wrap returns nil.
+func Wrap(err error, message string) error {
+ if err == nil {
+ return nil
+ }
+ err = &withMessage{
+ cause: err,
+ msg: message,
+ }
+ return &withStack{
+ err,
+ callers(),
+ }
+}
+
+// Wrapf returns an error annotating err with a stack trace
+// at the point Wrapf is call, and the format specifier.
+// If err is nil, Wrapf returns nil.
+func Wrapf(err error, format string, args ...interface{}) error {
+ if err == nil {
+ return nil
+ }
+ err = &withMessage{
+ cause: err,
+ msg: fmt.Sprintf(format, args...),
+ }
+ return &withStack{
+ err,
+ callers(),
+ }
+}
+
+// WithMessage annotates err with a new message.
+// If err is nil, WithMessage returns nil.
+func WithMessage(err error, message string) error {
+ if err == nil {
+ return nil
+ }
+ return &withMessage{
+ cause: err,
+ msg: message,
+ }
+}
+
+type withMessage struct {
+ cause error
+ msg string
+}
+
+func (w *withMessage) Error() string { return w.msg + ": " + w.cause.Error() }
+func (w *withMessage) Cause() error { return w.cause }
+
+func (w *withMessage) Format(s fmt.State, verb rune) {
+ switch verb {
+ case 'v':
+ if s.Flag('+') {
+ fmt.Fprintf(s, "%+v\n", w.Cause())
+ io.WriteString(s, w.msg)
+ return
+ }
+ fallthrough
+ case 's', 'q':
+ io.WriteString(s, w.Error())
+ }
+}
+
+// Cause returns the underlying cause of the error, if possible.
+// An error value has a cause if it implements the following
+// interface:
+//
+// type causer interface {
+// Cause() error
+// }
+//
+// If the error does not implement Cause, the original error will
+// be returned. If the error is nil, nil will be returned without further
+// investigation.
+func Cause(err error) error {
+ type causer interface {
+ Cause() error
+ }
+
+ for err != nil {
+ cause, ok := err.(causer)
+ if !ok {
+ break
+ }
+ err = cause.Cause()
+ }
+ return err
+}
diff --git a/vendor/github.com/pkg/errors/stack.go b/vendor/github.com/pkg/errors/stack.go
new file mode 100644
index 00000000..cbe3f3e3
--- /dev/null
+++ b/vendor/github.com/pkg/errors/stack.go
@@ -0,0 +1,186 @@
+package errors
+
+import (
+ "fmt"
+ "io"
+ "path"
+ "runtime"
+ "strings"
+)
+
+// Frame represents a program counter inside a stack frame.
+type Frame uintptr
+
+// pc returns the program counter for this frame;
+// multiple frames may have the same PC value.
+func (f Frame) pc() uintptr { return uintptr(f) - 1 }
+
+// file returns the full path to the file that contains the
+// function for this Frame's pc.
+func (f Frame) file() string {
+ fn := runtime.FuncForPC(f.pc())
+ if fn == nil {
+ return "unknown"
+ }
+ file, _ := fn.FileLine(f.pc())
+ return file
+}
+
+// line returns the line number of source code of the
+// function for this Frame's pc.
+func (f Frame) line() int {
+ fn := runtime.FuncForPC(f.pc())
+ if fn == nil {
+ return 0
+ }
+ _, line := fn.FileLine(f.pc())
+ return line
+}
+
+// Format formats the frame according to the fmt.Formatter interface.
+//
+// %s source file
+// %d source line
+// %n function name
+// %v equivalent to %s:%d
+//
+// Format accepts flags that alter the printing of some verbs, as follows:
+//
+// %+s path of source file relative to the compile time GOPATH
+// %+v equivalent to %+s:%d
+func (f Frame) Format(s fmt.State, verb rune) {
+ switch verb {
+ case 's':
+ switch {
+ case s.Flag('+'):
+ pc := f.pc()
+ fn := runtime.FuncForPC(pc)
+ if fn == nil {
+ io.WriteString(s, "unknown")
+ } else {
+ file, _ := fn.FileLine(pc)
+ fmt.Fprintf(s, "%s\n\t%s", fn.Name(), file)
+ }
+ default:
+ io.WriteString(s, path.Base(f.file()))
+ }
+ case 'd':
+ fmt.Fprintf(s, "%d", f.line())
+ case 'n':
+ name := runtime.FuncForPC(f.pc()).Name()
+ io.WriteString(s, funcname(name))
+ case 'v':
+ f.Format(s, 's')
+ io.WriteString(s, ":")
+ f.Format(s, 'd')
+ }
+}
+
+// StackTrace is stack of Frames from innermost (newest) to outermost (oldest).
+type StackTrace []Frame
+
+// Format formats the stack of Frames according to the fmt.Formatter interface.
+//
+// %s lists source files for each Frame in the stack
+// %v lists the source file and line number for each Frame in the stack
+//
+// Format accepts flags that alter the printing of some verbs, as follows:
+//
+// %+v Prints filename, function, and line number for each Frame in the stack.
+func (st StackTrace) Format(s fmt.State, verb rune) {
+ switch verb {
+ case 'v':
+ switch {
+ case s.Flag('+'):
+ for _, f := range st {
+ fmt.Fprintf(s, "\n%+v", f)
+ }
+ case s.Flag('#'):
+ fmt.Fprintf(s, "%#v", []Frame(st))
+ default:
+ fmt.Fprintf(s, "%v", []Frame(st))
+ }
+ case 's':
+ fmt.Fprintf(s, "%s", []Frame(st))
+ }
+}
+
+// stack represents a stack of program counters.
+type stack []uintptr
+
+func (s *stack) Format(st fmt.State, verb rune) {
+ switch verb {
+ case 'v':
+ switch {
+ case st.Flag('+'):
+ for _, pc := range *s {
+ f := Frame(pc)
+ fmt.Fprintf(st, "\n%+v", f)
+ }
+ }
+ }
+}
+
+func (s *stack) StackTrace() StackTrace {
+ f := make([]Frame, len(*s))
+ for i := 0; i < len(f); i++ {
+ f[i] = Frame((*s)[i])
+ }
+ return f
+}
+
+func callers() *stack {
+ const depth = 32
+ var pcs [depth]uintptr
+ n := runtime.Callers(3, pcs[:])
+ var st stack = pcs[0:n]
+ return &st
+}
+
+// funcname removes the path prefix component of a function's name reported by func.Name().
+func funcname(name string) string {
+ i := strings.LastIndex(name, "/")
+ name = name[i+1:]
+ i = strings.Index(name, ".")
+ return name[i+1:]
+}
+
+func trimGOPATH(name, file string) string {
+ // Here we want to get the source file path relative to the compile time
+ // GOPATH. As of Go 1.6.x there is no direct way to know the compiled
+ // GOPATH at runtime, but we can infer the number of path segments in the
+ // GOPATH. We note that fn.Name() returns the function name qualified by
+ // the import path, which does not include the GOPATH. Thus we can trim
+ // segments from the beginning of the file path until the number of path
+ // separators remaining is one more than the number of path separators in
+ // the function name. For example, given:
+ //
+ // GOPATH /home/user
+ // file /home/user/src/pkg/sub/file.go
+ // fn.Name() pkg/sub.Type.Method
+ //
+ // We want to produce:
+ //
+ // pkg/sub/file.go
+ //
+ // From this we can easily see that fn.Name() has one less path separator
+ // than our desired output. We count separators from the end of the file
+ // path until it finds two more than in the function name and then move
+ // one character forward to preserve the initial path segment without a
+ // leading separator.
+ const sep = "/"
+ goal := strings.Count(name, sep) + 2
+ i := len(file)
+ for n := 0; n < goal; n++ {
+ i = strings.LastIndex(file[:i], sep)
+ if i == -1 {
+ // not enough separators found, set i so that the slice expression
+ // below leaves file unmodified
+ i = -len(sep)
+ break
+ }
+ }
+ // get back to 0 or trim the leading separator
+ file = file[i+len(sep):]
+ return file
+}
diff --git a/vendor/github.com/rcrowley/go-metrics/LICENSE b/vendor/github.com/rcrowley/go-metrics/LICENSE
new file mode 100644
index 00000000..363fa9ee
--- /dev/null
+++ b/vendor/github.com/rcrowley/go-metrics/LICENSE
@@ -0,0 +1,29 @@
+Copyright 2012 Richard Crowley. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ 1. Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above
+ copyright notice, this list of conditions and the following
+ disclaimer in the documentation and/or other materials provided
+ with the distribution.
+
+THIS SOFTWARE IS PROVIDED BY RICHARD CROWLEY ``AS IS'' AND ANY EXPRESS
+OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL RICHARD CROWLEY OR CONTRIBUTORS BE LIABLE
+FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+THE POSSIBILITY OF SUCH DAMAGE.
+
+The views and conclusions contained in the software and documentation
+are those of the authors and should not be interpreted as representing
+official policies, either expressed or implied, of Richard Crowley.
diff --git a/vendor/github.com/rcrowley/go-metrics/README.md b/vendor/github.com/rcrowley/go-metrics/README.md
new file mode 100644
index 00000000..2d1a6dcf
--- /dev/null
+++ b/vendor/github.com/rcrowley/go-metrics/README.md
@@ -0,0 +1,153 @@
+go-metrics
+==========
+
+![travis build status](https://travis-ci.org/rcrowley/go-metrics.svg?branch=master)
+
+Go port of Coda Hale's Metrics library: .
+
+Documentation: .
+
+Usage
+-----
+
+Create and update metrics:
+
+```go
+c := metrics.NewCounter()
+metrics.Register("foo", c)
+c.Inc(47)
+
+g := metrics.NewGauge()
+metrics.Register("bar", g)
+g.Update(47)
+
+r := NewRegistry()
+g := metrics.NewRegisteredFunctionalGauge("cache-evictions", r, func() int64 { return cache.getEvictionsCount() })
+
+s := metrics.NewExpDecaySample(1028, 0.015) // or metrics.NewUniformSample(1028)
+h := metrics.NewHistogram(s)
+metrics.Register("baz", h)
+h.Update(47)
+
+m := metrics.NewMeter()
+metrics.Register("quux", m)
+m.Mark(47)
+
+t := metrics.NewTimer()
+metrics.Register("bang", t)
+t.Time(func() {})
+t.Update(47)
+```
+
+Register() is not threadsafe. For threadsafe metric registration use
+GetOrRegister:
+
+```
+t := metrics.GetOrRegisterTimer("account.create.latency", nil)
+t.Time(func() {})
+t.Update(47)
+```
+
+Periodically log every metric in human-readable form to standard error:
+
+```go
+go metrics.Log(metrics.DefaultRegistry, 5 * time.Second, log.New(os.Stderr, "metrics: ", log.Lmicroseconds))
+```
+
+Periodically log every metric in slightly-more-parseable form to syslog:
+
+```go
+w, _ := syslog.Dial("unixgram", "/dev/log", syslog.LOG_INFO, "metrics")
+go metrics.Syslog(metrics.DefaultRegistry, 60e9, w)
+```
+
+Periodically emit every metric to Graphite using the [Graphite client](https://github.com/cyberdelia/go-metrics-graphite):
+
+```go
+
+import "github.com/cyberdelia/go-metrics-graphite"
+
+addr, _ := net.ResolveTCPAddr("tcp", "127.0.0.1:2003")
+go graphite.Graphite(metrics.DefaultRegistry, 10e9, "metrics", addr)
+```
+
+Periodically emit every metric into InfluxDB:
+
+**NOTE:** this has been pulled out of the library due to constant fluctuations
+in the InfluxDB API. In fact, all client libraries are on their way out. see
+issues [#121](https://github.com/rcrowley/go-metrics/issues/121) and
+[#124](https://github.com/rcrowley/go-metrics/issues/124) for progress and details.
+
+```go
+import "github.com/vrischmann/go-metrics-influxdb"
+
+go influxdb.Influxdb(metrics.DefaultRegistry, 10e9, &influxdb.Config{
+ Host: "127.0.0.1:8086",
+ Database: "metrics",
+ Username: "test",
+ Password: "test",
+})
+```
+
+Periodically upload every metric to Librato using the [Librato client](https://github.com/mihasya/go-metrics-librato):
+
+**Note**: the client included with this repository under the `librato` package
+has been deprecated and moved to the repository linked above.
+
+```go
+import "github.com/mihasya/go-metrics-librato"
+
+go librato.Librato(metrics.DefaultRegistry,
+ 10e9, // interval
+ "example@example.com", // account owner email address
+ "token", // Librato API token
+ "hostname", // source
+ []float64{0.95}, // percentiles to send
+ time.Millisecond, // time unit
+)
+```
+
+Periodically emit every metric to StatHat:
+
+```go
+import "github.com/rcrowley/go-metrics/stathat"
+
+go stathat.Stathat(metrics.DefaultRegistry, 10e9, "example@example.com")
+```
+
+Maintain all metrics along with expvars at `/debug/metrics`:
+
+This uses the same mechanism as [the official expvar](http://golang.org/pkg/expvar/)
+but exposed under `/debug/metrics`, which shows a json representation of all your usual expvars
+as well as all your go-metrics.
+
+
+```go
+import "github.com/rcrowley/go-metrics/exp"
+
+exp.Exp(metrics.DefaultRegistry)
+```
+
+Installation
+------------
+
+```sh
+go get github.com/rcrowley/go-metrics
+```
+
+StatHat support additionally requires their Go client:
+
+```sh
+go get github.com/stathat/go
+```
+
+Publishing Metrics
+------------------
+
+Clients are available for the following destinations:
+
+* Librato - [https://github.com/mihasya/go-metrics-librato](https://github.com/mihasya/go-metrics-librato)
+* Graphite - [https://github.com/cyberdelia/go-metrics-graphite](https://github.com/cyberdelia/go-metrics-graphite)
+* InfluxDB - [https://github.com/vrischmann/go-metrics-influxdb](https://github.com/vrischmann/go-metrics-influxdb)
+* Ganglia - [https://github.com/appscode/metlia](https://github.com/appscode/metlia)
+* Prometheus - [https://github.com/deathowl/go-metrics-prometheus](https://github.com/deathowl/go-metrics-prometheus)
diff --git a/vendor/github.com/rcrowley/go-metrics/counter.go b/vendor/github.com/rcrowley/go-metrics/counter.go
new file mode 100644
index 00000000..bb7b039c
--- /dev/null
+++ b/vendor/github.com/rcrowley/go-metrics/counter.go
@@ -0,0 +1,112 @@
+package metrics
+
+import "sync/atomic"
+
+// Counters hold an int64 value that can be incremented and decremented.
+type Counter interface {
+ Clear()
+ Count() int64
+ Dec(int64)
+ Inc(int64)
+ Snapshot() Counter
+}
+
+// GetOrRegisterCounter returns an existing Counter or constructs and registers
+// a new StandardCounter.
+func GetOrRegisterCounter(name string, r Registry) Counter {
+ if nil == r {
+ r = DefaultRegistry
+ }
+ return r.GetOrRegister(name, NewCounter).(Counter)
+}
+
+// NewCounter constructs a new StandardCounter.
+func NewCounter() Counter {
+ if UseNilMetrics {
+ return NilCounter{}
+ }
+ return &StandardCounter{0}
+}
+
+// NewRegisteredCounter constructs and registers a new StandardCounter.
+func NewRegisteredCounter(name string, r Registry) Counter {
+ c := NewCounter()
+ if nil == r {
+ r = DefaultRegistry
+ }
+ r.Register(name, c)
+ return c
+}
+
+// CounterSnapshot is a read-only copy of another Counter.
+type CounterSnapshot int64
+
+// Clear panics.
+func (CounterSnapshot) Clear() {
+ panic("Clear called on a CounterSnapshot")
+}
+
+// Count returns the count at the time the snapshot was taken.
+func (c CounterSnapshot) Count() int64 { return int64(c) }
+
+// Dec panics.
+func (CounterSnapshot) Dec(int64) {
+ panic("Dec called on a CounterSnapshot")
+}
+
+// Inc panics.
+func (CounterSnapshot) Inc(int64) {
+ panic("Inc called on a CounterSnapshot")
+}
+
+// Snapshot returns the snapshot.
+func (c CounterSnapshot) Snapshot() Counter { return c }
+
+// NilCounter is a no-op Counter.
+type NilCounter struct{}
+
+// Clear is a no-op.
+func (NilCounter) Clear() {}
+
+// Count is a no-op.
+func (NilCounter) Count() int64 { return 0 }
+
+// Dec is a no-op.
+func (NilCounter) Dec(i int64) {}
+
+// Inc is a no-op.
+func (NilCounter) Inc(i int64) {}
+
+// Snapshot is a no-op.
+func (NilCounter) Snapshot() Counter { return NilCounter{} }
+
+// StandardCounter is the standard implementation of a Counter and uses the
+// sync/atomic package to manage a single int64 value.
+type StandardCounter struct {
+ count int64
+}
+
+// Clear sets the counter to zero.
+func (c *StandardCounter) Clear() {
+ atomic.StoreInt64(&c.count, 0)
+}
+
+// Count returns the current count.
+func (c *StandardCounter) Count() int64 {
+ return atomic.LoadInt64(&c.count)
+}
+
+// Dec decrements the counter by the given amount.
+func (c *StandardCounter) Dec(i int64) {
+ atomic.AddInt64(&c.count, -i)
+}
+
+// Inc increments the counter by the given amount.
+func (c *StandardCounter) Inc(i int64) {
+ atomic.AddInt64(&c.count, i)
+}
+
+// Snapshot returns a read-only copy of the counter.
+func (c *StandardCounter) Snapshot() Counter {
+ return CounterSnapshot(c.Count())
+}
diff --git a/vendor/github.com/rcrowley/go-metrics/debug.go b/vendor/github.com/rcrowley/go-metrics/debug.go
new file mode 100644
index 00000000..043ccefa
--- /dev/null
+++ b/vendor/github.com/rcrowley/go-metrics/debug.go
@@ -0,0 +1,76 @@
+package metrics
+
+import (
+ "runtime/debug"
+ "time"
+)
+
+var (
+ debugMetrics struct {
+ GCStats struct {
+ LastGC Gauge
+ NumGC Gauge
+ Pause Histogram
+ //PauseQuantiles Histogram
+ PauseTotal Gauge
+ }
+ ReadGCStats Timer
+ }
+ gcStats debug.GCStats
+)
+
+// Capture new values for the Go garbage collector statistics exported in
+// debug.GCStats. This is designed to be called as a goroutine.
+func CaptureDebugGCStats(r Registry, d time.Duration) {
+ for _ = range time.Tick(d) {
+ CaptureDebugGCStatsOnce(r)
+ }
+}
+
+// Capture new values for the Go garbage collector statistics exported in
+// debug.GCStats. This is designed to be called in a background goroutine.
+// Giving a registry which has not been given to RegisterDebugGCStats will
+// panic.
+//
+// Be careful (but much less so) with this because debug.ReadGCStats calls
+// the C function runtime·lock(runtime·mheap) which, while not a stop-the-world
+// operation, isn't something you want to be doing all the time.
+func CaptureDebugGCStatsOnce(r Registry) {
+ lastGC := gcStats.LastGC
+ t := time.Now()
+ debug.ReadGCStats(&gcStats)
+ debugMetrics.ReadGCStats.UpdateSince(t)
+
+ debugMetrics.GCStats.LastGC.Update(int64(gcStats.LastGC.UnixNano()))
+ debugMetrics.GCStats.NumGC.Update(int64(gcStats.NumGC))
+ if lastGC != gcStats.LastGC && 0 < len(gcStats.Pause) {
+ debugMetrics.GCStats.Pause.Update(int64(gcStats.Pause[0]))
+ }
+ //debugMetrics.GCStats.PauseQuantiles.Update(gcStats.PauseQuantiles)
+ debugMetrics.GCStats.PauseTotal.Update(int64(gcStats.PauseTotal))
+}
+
+// Register metrics for the Go garbage collector statistics exported in
+// debug.GCStats. The metrics are named by their fully-qualified Go symbols,
+// i.e. debug.GCStats.PauseTotal.
+func RegisterDebugGCStats(r Registry) {
+ debugMetrics.GCStats.LastGC = NewGauge()
+ debugMetrics.GCStats.NumGC = NewGauge()
+ debugMetrics.GCStats.Pause = NewHistogram(NewExpDecaySample(1028, 0.015))
+ //debugMetrics.GCStats.PauseQuantiles = NewHistogram(NewExpDecaySample(1028, 0.015))
+ debugMetrics.GCStats.PauseTotal = NewGauge()
+ debugMetrics.ReadGCStats = NewTimer()
+
+ r.Register("debug.GCStats.LastGC", debugMetrics.GCStats.LastGC)
+ r.Register("debug.GCStats.NumGC", debugMetrics.GCStats.NumGC)
+ r.Register("debug.GCStats.Pause", debugMetrics.GCStats.Pause)
+ //r.Register("debug.GCStats.PauseQuantiles", debugMetrics.GCStats.PauseQuantiles)
+ r.Register("debug.GCStats.PauseTotal", debugMetrics.GCStats.PauseTotal)
+ r.Register("debug.ReadGCStats", debugMetrics.ReadGCStats)
+}
+
+// Allocate an initial slice for gcStats.Pause to avoid allocations during
+// normal operation.
+func init() {
+ gcStats.Pause = make([]time.Duration, 11)
+}
diff --git a/vendor/github.com/rcrowley/go-metrics/ewma.go b/vendor/github.com/rcrowley/go-metrics/ewma.go
new file mode 100644
index 00000000..694a1d03
--- /dev/null
+++ b/vendor/github.com/rcrowley/go-metrics/ewma.go
@@ -0,0 +1,118 @@
+package metrics
+
+import (
+ "math"
+ "sync"
+ "sync/atomic"
+)
+
+// EWMAs continuously calculate an exponentially-weighted moving average
+// based on an outside source of clock ticks.
+type EWMA interface {
+ Rate() float64
+ Snapshot() EWMA
+ Tick()
+ Update(int64)
+}
+
+// NewEWMA constructs a new EWMA with the given alpha.
+func NewEWMA(alpha float64) EWMA {
+ if UseNilMetrics {
+ return NilEWMA{}
+ }
+ return &StandardEWMA{alpha: alpha}
+}
+
+// NewEWMA1 constructs a new EWMA for a one-minute moving average.
+func NewEWMA1() EWMA {
+ return NewEWMA(1 - math.Exp(-5.0/60.0/1))
+}
+
+// NewEWMA5 constructs a new EWMA for a five-minute moving average.
+func NewEWMA5() EWMA {
+ return NewEWMA(1 - math.Exp(-5.0/60.0/5))
+}
+
+// NewEWMA15 constructs a new EWMA for a fifteen-minute moving average.
+func NewEWMA15() EWMA {
+ return NewEWMA(1 - math.Exp(-5.0/60.0/15))
+}
+
+// EWMASnapshot is a read-only copy of another EWMA.
+type EWMASnapshot float64
+
+// Rate returns the rate of events per second at the time the snapshot was
+// taken.
+func (a EWMASnapshot) Rate() float64 { return float64(a) }
+
+// Snapshot returns the snapshot.
+func (a EWMASnapshot) Snapshot() EWMA { return a }
+
+// Tick panics.
+func (EWMASnapshot) Tick() {
+ panic("Tick called on an EWMASnapshot")
+}
+
+// Update panics.
+func (EWMASnapshot) Update(int64) {
+ panic("Update called on an EWMASnapshot")
+}
+
+// NilEWMA is a no-op EWMA.
+type NilEWMA struct{}
+
+// Rate is a no-op.
+func (NilEWMA) Rate() float64 { return 0.0 }
+
+// Snapshot is a no-op.
+func (NilEWMA) Snapshot() EWMA { return NilEWMA{} }
+
+// Tick is a no-op.
+func (NilEWMA) Tick() {}
+
+// Update is a no-op.
+func (NilEWMA) Update(n int64) {}
+
+// StandardEWMA is the standard implementation of an EWMA and tracks the number
+// of uncounted events and processes them on each tick. It uses the
+// sync/atomic package to manage uncounted events.
+type StandardEWMA struct {
+ uncounted int64 // /!\ this should be the first member to ensure 64-bit alignment
+ alpha float64
+ rate float64
+ init bool
+ mutex sync.Mutex
+}
+
+// Rate returns the moving average rate of events per second.
+func (a *StandardEWMA) Rate() float64 {
+ a.mutex.Lock()
+ defer a.mutex.Unlock()
+ return a.rate * float64(1e9)
+}
+
+// Snapshot returns a read-only copy of the EWMA.
+func (a *StandardEWMA) Snapshot() EWMA {
+ return EWMASnapshot(a.Rate())
+}
+
+// Tick ticks the clock to update the moving average. It assumes it is called
+// every five seconds.
+func (a *StandardEWMA) Tick() {
+ count := atomic.LoadInt64(&a.uncounted)
+ atomic.AddInt64(&a.uncounted, -count)
+ instantRate := float64(count) / float64(5e9)
+ a.mutex.Lock()
+ defer a.mutex.Unlock()
+ if a.init {
+ a.rate += a.alpha * (instantRate - a.rate)
+ } else {
+ a.init = true
+ a.rate = instantRate
+ }
+}
+
+// Update adds n uncounted events.
+func (a *StandardEWMA) Update(n int64) {
+ atomic.AddInt64(&a.uncounted, n)
+}
diff --git a/vendor/github.com/rcrowley/go-metrics/gauge.go b/vendor/github.com/rcrowley/go-metrics/gauge.go
new file mode 100644
index 00000000..cb57a938
--- /dev/null
+++ b/vendor/github.com/rcrowley/go-metrics/gauge.go
@@ -0,0 +1,120 @@
+package metrics
+
+import "sync/atomic"
+
+// Gauges hold an int64 value that can be set arbitrarily.
+type Gauge interface {
+ Snapshot() Gauge
+ Update(int64)
+ Value() int64
+}
+
+// GetOrRegisterGauge returns an existing Gauge or constructs and registers a
+// new StandardGauge.
+func GetOrRegisterGauge(name string, r Registry) Gauge {
+ if nil == r {
+ r = DefaultRegistry
+ }
+ return r.GetOrRegister(name, NewGauge).(Gauge)
+}
+
+// NewGauge constructs a new StandardGauge.
+func NewGauge() Gauge {
+ if UseNilMetrics {
+ return NilGauge{}
+ }
+ return &StandardGauge{0}
+}
+
+// NewRegisteredGauge constructs and registers a new StandardGauge.
+func NewRegisteredGauge(name string, r Registry) Gauge {
+ c := NewGauge()
+ if nil == r {
+ r = DefaultRegistry
+ }
+ r.Register(name, c)
+ return c
+}
+
+// NewFunctionalGauge constructs a new FunctionalGauge.
+func NewFunctionalGauge(f func() int64) Gauge {
+ if UseNilMetrics {
+ return NilGauge{}
+ }
+ return &FunctionalGauge{value: f}
+}
+
+// NewRegisteredFunctionalGauge constructs and registers a new StandardGauge.
+func NewRegisteredFunctionalGauge(name string, r Registry, f func() int64) Gauge {
+ c := NewFunctionalGauge(f)
+ if nil == r {
+ r = DefaultRegistry
+ }
+ r.Register(name, c)
+ return c
+}
+
+// GaugeSnapshot is a read-only copy of another Gauge.
+type GaugeSnapshot int64
+
+// Snapshot returns the snapshot.
+func (g GaugeSnapshot) Snapshot() Gauge { return g }
+
+// Update panics.
+func (GaugeSnapshot) Update(int64) {
+ panic("Update called on a GaugeSnapshot")
+}
+
+// Value returns the value at the time the snapshot was taken.
+func (g GaugeSnapshot) Value() int64 { return int64(g) }
+
+// NilGauge is a no-op Gauge.
+type NilGauge struct{}
+
+// Snapshot is a no-op.
+func (NilGauge) Snapshot() Gauge { return NilGauge{} }
+
+// Update is a no-op.
+func (NilGauge) Update(v int64) {}
+
+// Value is a no-op.
+func (NilGauge) Value() int64 { return 0 }
+
+// StandardGauge is the standard implementation of a Gauge and uses the
+// sync/atomic package to manage a single int64 value.
+type StandardGauge struct {
+ value int64
+}
+
+// Snapshot returns a read-only copy of the gauge.
+func (g *StandardGauge) Snapshot() Gauge {
+ return GaugeSnapshot(g.Value())
+}
+
+// Update updates the gauge's value.
+func (g *StandardGauge) Update(v int64) {
+ atomic.StoreInt64(&g.value, v)
+}
+
+// Value returns the gauge's current value.
+func (g *StandardGauge) Value() int64 {
+ return atomic.LoadInt64(&g.value)
+}
+
+// FunctionalGauge returns value from given function
+type FunctionalGauge struct {
+ value func() int64
+}
+
+// Value returns the gauge's current value.
+func (g FunctionalGauge) Value() int64 {
+ return g.value()
+}
+
+// Snapshot returns the snapshot.
+func (g FunctionalGauge) Snapshot() Gauge { return GaugeSnapshot(g.Value()) }
+
+// Update panics.
+func (FunctionalGauge) Update(int64) {
+ panic("Update called on a FunctionalGauge")
+}
diff --git a/vendor/github.com/rcrowley/go-metrics/gauge_float64.go b/vendor/github.com/rcrowley/go-metrics/gauge_float64.go
new file mode 100644
index 00000000..6f93920b
--- /dev/null
+++ b/vendor/github.com/rcrowley/go-metrics/gauge_float64.go
@@ -0,0 +1,127 @@
+package metrics
+
+import "sync"
+
+// GaugeFloat64s hold a float64 value that can be set arbitrarily.
+type GaugeFloat64 interface {
+ Snapshot() GaugeFloat64
+ Update(float64)
+ Value() float64
+}
+
+// GetOrRegisterGaugeFloat64 returns an existing GaugeFloat64 or constructs and registers a
+// new StandardGaugeFloat64.
+func GetOrRegisterGaugeFloat64(name string, r Registry) GaugeFloat64 {
+ if nil == r {
+ r = DefaultRegistry
+ }
+ return r.GetOrRegister(name, NewGaugeFloat64()).(GaugeFloat64)
+}
+
+// NewGaugeFloat64 constructs a new StandardGaugeFloat64.
+func NewGaugeFloat64() GaugeFloat64 {
+ if UseNilMetrics {
+ return NilGaugeFloat64{}
+ }
+ return &StandardGaugeFloat64{
+ value: 0.0,
+ }
+}
+
+// NewRegisteredGaugeFloat64 constructs and registers a new StandardGaugeFloat64.
+func NewRegisteredGaugeFloat64(name string, r Registry) GaugeFloat64 {
+ c := NewGaugeFloat64()
+ if nil == r {
+ r = DefaultRegistry
+ }
+ r.Register(name, c)
+ return c
+}
+
+// NewFunctionalGauge constructs a new FunctionalGauge.
+func NewFunctionalGaugeFloat64(f func() float64) GaugeFloat64 {
+ if UseNilMetrics {
+ return NilGaugeFloat64{}
+ }
+ return &FunctionalGaugeFloat64{value: f}
+}
+
+// NewRegisteredFunctionalGauge constructs and registers a new StandardGauge.
+func NewRegisteredFunctionalGaugeFloat64(name string, r Registry, f func() float64) GaugeFloat64 {
+ c := NewFunctionalGaugeFloat64(f)
+ if nil == r {
+ r = DefaultRegistry
+ }
+ r.Register(name, c)
+ return c
+}
+
+// GaugeFloat64Snapshot is a read-only copy of another GaugeFloat64.
+type GaugeFloat64Snapshot float64
+
+// Snapshot returns the snapshot.
+func (g GaugeFloat64Snapshot) Snapshot() GaugeFloat64 { return g }
+
+// Update panics.
+func (GaugeFloat64Snapshot) Update(float64) {
+ panic("Update called on a GaugeFloat64Snapshot")
+}
+
+// Value returns the value at the time the snapshot was taken.
+func (g GaugeFloat64Snapshot) Value() float64 { return float64(g) }
+
+// NilGauge is a no-op Gauge.
+type NilGaugeFloat64 struct{}
+
+// Snapshot is a no-op.
+func (NilGaugeFloat64) Snapshot() GaugeFloat64 { return NilGaugeFloat64{} }
+
+// Update is a no-op.
+func (NilGaugeFloat64) Update(v float64) {}
+
+// Value is a no-op.
+func (NilGaugeFloat64) Value() float64 { return 0.0 }
+
+// StandardGaugeFloat64 is the standard implementation of a GaugeFloat64 and uses
+// sync.Mutex to manage a single float64 value.
+type StandardGaugeFloat64 struct {
+ mutex sync.Mutex
+ value float64
+}
+
+// Snapshot returns a read-only copy of the gauge.
+func (g *StandardGaugeFloat64) Snapshot() GaugeFloat64 {
+ return GaugeFloat64Snapshot(g.Value())
+}
+
+// Update updates the gauge's value.
+func (g *StandardGaugeFloat64) Update(v float64) {
+ g.mutex.Lock()
+ defer g.mutex.Unlock()
+ g.value = v
+}
+
+// Value returns the gauge's current value.
+func (g *StandardGaugeFloat64) Value() float64 {
+ g.mutex.Lock()
+ defer g.mutex.Unlock()
+ return g.value
+}
+
+// FunctionalGaugeFloat64 returns value from given function
+type FunctionalGaugeFloat64 struct {
+ value func() float64
+}
+
+// Value returns the gauge's current value.
+func (g FunctionalGaugeFloat64) Value() float64 {
+ return g.value()
+}
+
+// Snapshot returns the snapshot.
+func (g FunctionalGaugeFloat64) Snapshot() GaugeFloat64 { return GaugeFloat64Snapshot(g.Value()) }
+
+// Update panics.
+func (FunctionalGaugeFloat64) Update(float64) {
+ panic("Update called on a FunctionalGaugeFloat64")
+}
diff --git a/vendor/github.com/rcrowley/go-metrics/graphite.go b/vendor/github.com/rcrowley/go-metrics/graphite.go
new file mode 100644
index 00000000..abd0a7d2
--- /dev/null
+++ b/vendor/github.com/rcrowley/go-metrics/graphite.go
@@ -0,0 +1,113 @@
+package metrics
+
+import (
+ "bufio"
+ "fmt"
+ "log"
+ "net"
+ "strconv"
+ "strings"
+ "time"
+)
+
+// GraphiteConfig provides a container with configuration parameters for
+// the Graphite exporter
+type GraphiteConfig struct {
+ Addr *net.TCPAddr // Network address to connect to
+ Registry Registry // Registry to be exported
+ FlushInterval time.Duration // Flush interval
+ DurationUnit time.Duration // Time conversion unit for durations
+ Prefix string // Prefix to be prepended to metric names
+ Percentiles []float64 // Percentiles to export from timers and histograms
+}
+
+// Graphite is a blocking exporter function which reports metrics in r
+// to a graphite server located at addr, flushing them every d duration
+// and prepending metric names with prefix.
+func Graphite(r Registry, d time.Duration, prefix string, addr *net.TCPAddr) {
+ GraphiteWithConfig(GraphiteConfig{
+ Addr: addr,
+ Registry: r,
+ FlushInterval: d,
+ DurationUnit: time.Nanosecond,
+ Prefix: prefix,
+ Percentiles: []float64{0.5, 0.75, 0.95, 0.99, 0.999},
+ })
+}
+
+// GraphiteWithConfig is a blocking exporter function just like Graphite,
+// but it takes a GraphiteConfig instead.
+func GraphiteWithConfig(c GraphiteConfig) {
+ log.Printf("WARNING: This go-metrics client has been DEPRECATED! It has been moved to https://github.com/cyberdelia/go-metrics-graphite and will be removed from rcrowley/go-metrics on August 12th 2015")
+ for _ = range time.Tick(c.FlushInterval) {
+ if err := graphite(&c); nil != err {
+ log.Println(err)
+ }
+ }
+}
+
+// GraphiteOnce performs a single submission to Graphite, returning a
+// non-nil error on failed connections. This can be used in a loop
+// similar to GraphiteWithConfig for custom error handling.
+func GraphiteOnce(c GraphiteConfig) error {
+ log.Printf("WARNING: This go-metrics client has been DEPRECATED! It has been moved to https://github.com/cyberdelia/go-metrics-graphite and will be removed from rcrowley/go-metrics on August 12th 2015")
+ return graphite(&c)
+}
+
+func graphite(c *GraphiteConfig) error {
+ now := time.Now().Unix()
+ du := float64(c.DurationUnit)
+ conn, err := net.DialTCP("tcp", nil, c.Addr)
+ if nil != err {
+ return err
+ }
+ defer conn.Close()
+ w := bufio.NewWriter(conn)
+ c.Registry.Each(func(name string, i interface{}) {
+ switch metric := i.(type) {
+ case Counter:
+ fmt.Fprintf(w, "%s.%s.count %d %d\n", c.Prefix, name, metric.Count(), now)
+ case Gauge:
+ fmt.Fprintf(w, "%s.%s.value %d %d\n", c.Prefix, name, metric.Value(), now)
+ case GaugeFloat64:
+ fmt.Fprintf(w, "%s.%s.value %f %d\n", c.Prefix, name, metric.Value(), now)
+ case Histogram:
+ h := metric.Snapshot()
+ ps := h.Percentiles(c.Percentiles)
+ fmt.Fprintf(w, "%s.%s.count %d %d\n", c.Prefix, name, h.Count(), now)
+ fmt.Fprintf(w, "%s.%s.min %d %d\n", c.Prefix, name, h.Min(), now)
+ fmt.Fprintf(w, "%s.%s.max %d %d\n", c.Prefix, name, h.Max(), now)
+ fmt.Fprintf(w, "%s.%s.mean %.2f %d\n", c.Prefix, name, h.Mean(), now)
+ fmt.Fprintf(w, "%s.%s.std-dev %.2f %d\n", c.Prefix, name, h.StdDev(), now)
+ for psIdx, psKey := range c.Percentiles {
+ key := strings.Replace(strconv.FormatFloat(psKey*100.0, 'f', -1, 64), ".", "", 1)
+ fmt.Fprintf(w, "%s.%s.%s-percentile %.2f %d\n", c.Prefix, name, key, ps[psIdx], now)
+ }
+ case Meter:
+ m := metric.Snapshot()
+ fmt.Fprintf(w, "%s.%s.count %d %d\n", c.Prefix, name, m.Count(), now)
+ fmt.Fprintf(w, "%s.%s.one-minute %.2f %d\n", c.Prefix, name, m.Rate1(), now)
+ fmt.Fprintf(w, "%s.%s.five-minute %.2f %d\n", c.Prefix, name, m.Rate5(), now)
+ fmt.Fprintf(w, "%s.%s.fifteen-minute %.2f %d\n", c.Prefix, name, m.Rate15(), now)
+ fmt.Fprintf(w, "%s.%s.mean %.2f %d\n", c.Prefix, name, m.RateMean(), now)
+ case Timer:
+ t := metric.Snapshot()
+ ps := t.Percentiles(c.Percentiles)
+ fmt.Fprintf(w, "%s.%s.count %d %d\n", c.Prefix, name, t.Count(), now)
+ fmt.Fprintf(w, "%s.%s.min %d %d\n", c.Prefix, name, t.Min()/int64(du), now)
+ fmt.Fprintf(w, "%s.%s.max %d %d\n", c.Prefix, name, t.Max()/int64(du), now)
+ fmt.Fprintf(w, "%s.%s.mean %.2f %d\n", c.Prefix, name, t.Mean()/du, now)
+ fmt.Fprintf(w, "%s.%s.std-dev %.2f %d\n", c.Prefix, name, t.StdDev()/du, now)
+ for psIdx, psKey := range c.Percentiles {
+ key := strings.Replace(strconv.FormatFloat(psKey*100.0, 'f', -1, 64), ".", "", 1)
+ fmt.Fprintf(w, "%s.%s.%s-percentile %.2f %d\n", c.Prefix, name, key, ps[psIdx], now)
+ }
+ fmt.Fprintf(w, "%s.%s.one-minute %.2f %d\n", c.Prefix, name, t.Rate1(), now)
+ fmt.Fprintf(w, "%s.%s.five-minute %.2f %d\n", c.Prefix, name, t.Rate5(), now)
+ fmt.Fprintf(w, "%s.%s.fifteen-minute %.2f %d\n", c.Prefix, name, t.Rate15(), now)
+ fmt.Fprintf(w, "%s.%s.mean-rate %.2f %d\n", c.Prefix, name, t.RateMean(), now)
+ }
+ w.Flush()
+ })
+ return nil
+}
diff --git a/vendor/github.com/rcrowley/go-metrics/healthcheck.go b/vendor/github.com/rcrowley/go-metrics/healthcheck.go
new file mode 100644
index 00000000..445131ca
--- /dev/null
+++ b/vendor/github.com/rcrowley/go-metrics/healthcheck.go
@@ -0,0 +1,61 @@
+package metrics
+
+// Healthchecks hold an error value describing an arbitrary up/down status.
+type Healthcheck interface {
+ Check()
+ Error() error
+ Healthy()
+ Unhealthy(error)
+}
+
+// NewHealthcheck constructs a new Healthcheck which will use the given
+// function to update its status.
+func NewHealthcheck(f func(Healthcheck)) Healthcheck {
+ if UseNilMetrics {
+ return NilHealthcheck{}
+ }
+ return &StandardHealthcheck{nil, f}
+}
+
+// NilHealthcheck is a no-op.
+type NilHealthcheck struct{}
+
+// Check is a no-op.
+func (NilHealthcheck) Check() {}
+
+// Error is a no-op.
+func (NilHealthcheck) Error() error { return nil }
+
+// Healthy is a no-op.
+func (NilHealthcheck) Healthy() {}
+
+// Unhealthy is a no-op.
+func (NilHealthcheck) Unhealthy(error) {}
+
+// StandardHealthcheck is the standard implementation of a Healthcheck and
+// stores the status and a function to call to update the status.
+type StandardHealthcheck struct {
+ err error
+ f func(Healthcheck)
+}
+
+// Check runs the healthcheck function to update the healthcheck's status.
+func (h *StandardHealthcheck) Check() {
+ h.f(h)
+}
+
+// Error returns the healthcheck's status, which will be nil if it is healthy.
+func (h *StandardHealthcheck) Error() error {
+ return h.err
+}
+
+// Healthy marks the healthcheck as healthy.
+func (h *StandardHealthcheck) Healthy() {
+ h.err = nil
+}
+
+// Unhealthy marks the healthcheck as unhealthy. The error is stored and
+// may be retrieved by the Error method.
+func (h *StandardHealthcheck) Unhealthy(err error) {
+ h.err = err
+}
diff --git a/vendor/github.com/rcrowley/go-metrics/histogram.go b/vendor/github.com/rcrowley/go-metrics/histogram.go
new file mode 100644
index 00000000..dbc837fe
--- /dev/null
+++ b/vendor/github.com/rcrowley/go-metrics/histogram.go
@@ -0,0 +1,202 @@
+package metrics
+
+// Histograms calculate distribution statistics from a series of int64 values.
+type Histogram interface {
+ Clear()
+ Count() int64
+ Max() int64
+ Mean() float64
+ Min() int64
+ Percentile(float64) float64
+ Percentiles([]float64) []float64
+ Sample() Sample
+ Snapshot() Histogram
+ StdDev() float64
+ Sum() int64
+ Update(int64)
+ Variance() float64
+}
+
+// GetOrRegisterHistogram returns an existing Histogram or constructs and
+// registers a new StandardHistogram.
+func GetOrRegisterHistogram(name string, r Registry, s Sample) Histogram {
+ if nil == r {
+ r = DefaultRegistry
+ }
+ return r.GetOrRegister(name, func() Histogram { return NewHistogram(s) }).(Histogram)
+}
+
+// NewHistogram constructs a new StandardHistogram from a Sample.
+func NewHistogram(s Sample) Histogram {
+ if UseNilMetrics {
+ return NilHistogram{}
+ }
+ return &StandardHistogram{sample: s}
+}
+
+// NewRegisteredHistogram constructs and registers a new StandardHistogram from
+// a Sample.
+func NewRegisteredHistogram(name string, r Registry, s Sample) Histogram {
+ c := NewHistogram(s)
+ if nil == r {
+ r = DefaultRegistry
+ }
+ r.Register(name, c)
+ return c
+}
+
+// HistogramSnapshot is a read-only copy of another Histogram.
+type HistogramSnapshot struct {
+ sample *SampleSnapshot
+}
+
+// Clear panics.
+func (*HistogramSnapshot) Clear() {
+ panic("Clear called on a HistogramSnapshot")
+}
+
+// Count returns the number of samples recorded at the time the snapshot was
+// taken.
+func (h *HistogramSnapshot) Count() int64 { return h.sample.Count() }
+
+// Max returns the maximum value in the sample at the time the snapshot was
+// taken.
+func (h *HistogramSnapshot) Max() int64 { return h.sample.Max() }
+
+// Mean returns the mean of the values in the sample at the time the snapshot
+// was taken.
+func (h *HistogramSnapshot) Mean() float64 { return h.sample.Mean() }
+
+// Min returns the minimum value in the sample at the time the snapshot was
+// taken.
+func (h *HistogramSnapshot) Min() int64 { return h.sample.Min() }
+
+// Percentile returns an arbitrary percentile of values in the sample at the
+// time the snapshot was taken.
+func (h *HistogramSnapshot) Percentile(p float64) float64 {
+ return h.sample.Percentile(p)
+}
+
+// Percentiles returns a slice of arbitrary percentiles of values in the sample
+// at the time the snapshot was taken.
+func (h *HistogramSnapshot) Percentiles(ps []float64) []float64 {
+ return h.sample.Percentiles(ps)
+}
+
+// Sample returns the Sample underlying the histogram.
+func (h *HistogramSnapshot) Sample() Sample { return h.sample }
+
+// Snapshot returns the snapshot.
+func (h *HistogramSnapshot) Snapshot() Histogram { return h }
+
+// StdDev returns the standard deviation of the values in the sample at the
+// time the snapshot was taken.
+func (h *HistogramSnapshot) StdDev() float64 { return h.sample.StdDev() }
+
+// Sum returns the sum in the sample at the time the snapshot was taken.
+func (h *HistogramSnapshot) Sum() int64 { return h.sample.Sum() }
+
+// Update panics.
+func (*HistogramSnapshot) Update(int64) {
+ panic("Update called on a HistogramSnapshot")
+}
+
+// Variance returns the variance of inputs at the time the snapshot was taken.
+func (h *HistogramSnapshot) Variance() float64 { return h.sample.Variance() }
+
+// NilHistogram is a no-op Histogram.
+type NilHistogram struct{}
+
+// Clear is a no-op.
+func (NilHistogram) Clear() {}
+
+// Count is a no-op.
+func (NilHistogram) Count() int64 { return 0 }
+
+// Max is a no-op.
+func (NilHistogram) Max() int64 { return 0 }
+
+// Mean is a no-op.
+func (NilHistogram) Mean() float64 { return 0.0 }
+
+// Min is a no-op.
+func (NilHistogram) Min() int64 { return 0 }
+
+// Percentile is a no-op.
+func (NilHistogram) Percentile(p float64) float64 { return 0.0 }
+
+// Percentiles is a no-op.
+func (NilHistogram) Percentiles(ps []float64) []float64 {
+ return make([]float64, len(ps))
+}
+
+// Sample is a no-op.
+func (NilHistogram) Sample() Sample { return NilSample{} }
+
+// Snapshot is a no-op.
+func (NilHistogram) Snapshot() Histogram { return NilHistogram{} }
+
+// StdDev is a no-op.
+func (NilHistogram) StdDev() float64 { return 0.0 }
+
+// Sum is a no-op.
+func (NilHistogram) Sum() int64 { return 0 }
+
+// Update is a no-op.
+func (NilHistogram) Update(v int64) {}
+
+// Variance is a no-op.
+func (NilHistogram) Variance() float64 { return 0.0 }
+
+// StandardHistogram is the standard implementation of a Histogram and uses a
+// Sample to bound its memory use.
+type StandardHistogram struct {
+ sample Sample
+}
+
+// Clear clears the histogram and its sample.
+func (h *StandardHistogram) Clear() { h.sample.Clear() }
+
+// Count returns the number of samples recorded since the histogram was last
+// cleared.
+func (h *StandardHistogram) Count() int64 { return h.sample.Count() }
+
+// Max returns the maximum value in the sample.
+func (h *StandardHistogram) Max() int64 { return h.sample.Max() }
+
+// Mean returns the mean of the values in the sample.
+func (h *StandardHistogram) Mean() float64 { return h.sample.Mean() }
+
+// Min returns the minimum value in the sample.
+func (h *StandardHistogram) Min() int64 { return h.sample.Min() }
+
+// Percentile returns an arbitrary percentile of the values in the sample.
+func (h *StandardHistogram) Percentile(p float64) float64 {
+ return h.sample.Percentile(p)
+}
+
+// Percentiles returns a slice of arbitrary percentiles of the values in the
+// sample.
+func (h *StandardHistogram) Percentiles(ps []float64) []float64 {
+ return h.sample.Percentiles(ps)
+}
+
+// Sample returns the Sample underlying the histogram.
+func (h *StandardHistogram) Sample() Sample { return h.sample }
+
+// Snapshot returns a read-only copy of the histogram.
+func (h *StandardHistogram) Snapshot() Histogram {
+ return &HistogramSnapshot{sample: h.sample.Snapshot().(*SampleSnapshot)}
+}
+
+// StdDev returns the standard deviation of the values in the sample.
+func (h *StandardHistogram) StdDev() float64 { return h.sample.StdDev() }
+
+// Sum returns the sum in the sample.
+func (h *StandardHistogram) Sum() int64 { return h.sample.Sum() }
+
+// Update samples a new value.
+func (h *StandardHistogram) Update(v int64) { h.sample.Update(v) }
+
+// Variance returns the variance of the values in the sample.
+func (h *StandardHistogram) Variance() float64 { return h.sample.Variance() }
diff --git a/vendor/github.com/rcrowley/go-metrics/json.go b/vendor/github.com/rcrowley/go-metrics/json.go
new file mode 100644
index 00000000..2fdcbcfb
--- /dev/null
+++ b/vendor/github.com/rcrowley/go-metrics/json.go
@@ -0,0 +1,87 @@
+package metrics
+
+import (
+ "encoding/json"
+ "io"
+ "time"
+)
+
+// MarshalJSON returns a byte slice containing a JSON representation of all
+// the metrics in the Registry.
+func (r *StandardRegistry) MarshalJSON() ([]byte, error) {
+ data := make(map[string]map[string]interface{})
+ r.Each(func(name string, i interface{}) {
+ values := make(map[string]interface{})
+ switch metric := i.(type) {
+ case Counter:
+ values["count"] = metric.Count()
+ case Gauge:
+ values["value"] = metric.Value()
+ case GaugeFloat64:
+ values["value"] = metric.Value()
+ case Healthcheck:
+ values["error"] = nil
+ metric.Check()
+ if err := metric.Error(); nil != err {
+ values["error"] = metric.Error().Error()
+ }
+ case Histogram:
+ h := metric.Snapshot()
+ ps := h.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999})
+ values["count"] = h.Count()
+ values["min"] = h.Min()
+ values["max"] = h.Max()
+ values["mean"] = h.Mean()
+ values["stddev"] = h.StdDev()
+ values["median"] = ps[0]
+ values["75%"] = ps[1]
+ values["95%"] = ps[2]
+ values["99%"] = ps[3]
+ values["99.9%"] = ps[4]
+ case Meter:
+ m := metric.Snapshot()
+ values["count"] = m.Count()
+ values["1m.rate"] = m.Rate1()
+ values["5m.rate"] = m.Rate5()
+ values["15m.rate"] = m.Rate15()
+ values["mean.rate"] = m.RateMean()
+ case Timer:
+ t := metric.Snapshot()
+ ps := t.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999})
+ values["count"] = t.Count()
+ values["min"] = t.Min()
+ values["max"] = t.Max()
+ values["mean"] = t.Mean()
+ values["stddev"] = t.StdDev()
+ values["median"] = ps[0]
+ values["75%"] = ps[1]
+ values["95%"] = ps[2]
+ values["99%"] = ps[3]
+ values["99.9%"] = ps[4]
+ values["1m.rate"] = t.Rate1()
+ values["5m.rate"] = t.Rate5()
+ values["15m.rate"] = t.Rate15()
+ values["mean.rate"] = t.RateMean()
+ }
+ data[name] = values
+ })
+ return json.Marshal(data)
+}
+
+// WriteJSON writes metrics from the given registry periodically to the
+// specified io.Writer as JSON.
+func WriteJSON(r Registry, d time.Duration, w io.Writer) {
+ for _ = range time.Tick(d) {
+ WriteJSONOnce(r, w)
+ }
+}
+
+// WriteJSONOnce writes metrics from the given registry to the specified
+// io.Writer as JSON.
+func WriteJSONOnce(r Registry, w io.Writer) {
+ json.NewEncoder(w).Encode(r)
+}
+
+func (p *PrefixedRegistry) MarshalJSON() ([]byte, error) {
+ return json.Marshal(p.underlying)
+}
diff --git a/vendor/github.com/rcrowley/go-metrics/log.go b/vendor/github.com/rcrowley/go-metrics/log.go
new file mode 100644
index 00000000..f8074c04
--- /dev/null
+++ b/vendor/github.com/rcrowley/go-metrics/log.go
@@ -0,0 +1,80 @@
+package metrics
+
+import (
+ "time"
+)
+
+type Logger interface {
+ Printf(format string, v ...interface{})
+}
+
+func Log(r Registry, freq time.Duration, l Logger) {
+ LogScaled(r, freq, time.Nanosecond, l)
+}
+
+// Output each metric in the given registry periodically using the given
+// logger. Print timings in `scale` units (eg time.Millisecond) rather than nanos.
+func LogScaled(r Registry, freq time.Duration, scale time.Duration, l Logger) {
+ du := float64(scale)
+ duSuffix := scale.String()[1:]
+
+ for _ = range time.Tick(freq) {
+ r.Each(func(name string, i interface{}) {
+ switch metric := i.(type) {
+ case Counter:
+ l.Printf("counter %s\n", name)
+ l.Printf(" count: %9d\n", metric.Count())
+ case Gauge:
+ l.Printf("gauge %s\n", name)
+ l.Printf(" value: %9d\n", metric.Value())
+ case GaugeFloat64:
+ l.Printf("gauge %s\n", name)
+ l.Printf(" value: %f\n", metric.Value())
+ case Healthcheck:
+ metric.Check()
+ l.Printf("healthcheck %s\n", name)
+ l.Printf(" error: %v\n", metric.Error())
+ case Histogram:
+ h := metric.Snapshot()
+ ps := h.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999})
+ l.Printf("histogram %s\n", name)
+ l.Printf(" count: %9d\n", h.Count())
+ l.Printf(" min: %9d\n", h.Min())
+ l.Printf(" max: %9d\n", h.Max())
+ l.Printf(" mean: %12.2f\n", h.Mean())
+ l.Printf(" stddev: %12.2f\n", h.StdDev())
+ l.Printf(" median: %12.2f\n", ps[0])
+ l.Printf(" 75%%: %12.2f\n", ps[1])
+ l.Printf(" 95%%: %12.2f\n", ps[2])
+ l.Printf(" 99%%: %12.2f\n", ps[3])
+ l.Printf(" 99.9%%: %12.2f\n", ps[4])
+ case Meter:
+ m := metric.Snapshot()
+ l.Printf("meter %s\n", name)
+ l.Printf(" count: %9d\n", m.Count())
+ l.Printf(" 1-min rate: %12.2f\n", m.Rate1())
+ l.Printf(" 5-min rate: %12.2f\n", m.Rate5())
+ l.Printf(" 15-min rate: %12.2f\n", m.Rate15())
+ l.Printf(" mean rate: %12.2f\n", m.RateMean())
+ case Timer:
+ t := metric.Snapshot()
+ ps := t.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999})
+ l.Printf("timer %s\n", name)
+ l.Printf(" count: %9d\n", t.Count())
+ l.Printf(" min: %12.2f%s\n", float64(t.Min())/du, duSuffix)
+ l.Printf(" max: %12.2f%s\n", float64(t.Max())/du, duSuffix)
+ l.Printf(" mean: %12.2f%s\n", t.Mean()/du, duSuffix)
+ l.Printf(" stddev: %12.2f%s\n", t.StdDev()/du, duSuffix)
+ l.Printf(" median: %12.2f%s\n", ps[0]/du, duSuffix)
+ l.Printf(" 75%%: %12.2f%s\n", ps[1]/du, duSuffix)
+ l.Printf(" 95%%: %12.2f%s\n", ps[2]/du, duSuffix)
+ l.Printf(" 99%%: %12.2f%s\n", ps[3]/du, duSuffix)
+ l.Printf(" 99.9%%: %12.2f%s\n", ps[4]/du, duSuffix)
+ l.Printf(" 1-min rate: %12.2f\n", t.Rate1())
+ l.Printf(" 5-min rate: %12.2f\n", t.Rate5())
+ l.Printf(" 15-min rate: %12.2f\n", t.Rate15())
+ l.Printf(" mean rate: %12.2f\n", t.RateMean())
+ }
+ })
+ }
+}
diff --git a/vendor/github.com/rcrowley/go-metrics/memory.md b/vendor/github.com/rcrowley/go-metrics/memory.md
new file mode 100644
index 00000000..47454f54
--- /dev/null
+++ b/vendor/github.com/rcrowley/go-metrics/memory.md
@@ -0,0 +1,285 @@
+Memory usage
+============
+
+(Highly unscientific.)
+
+Command used to gather static memory usage:
+
+```sh
+grep ^Vm "/proc/$(ps fax | grep [m]etrics-bench | awk '{print $1}')/status"
+```
+
+Program used to gather baseline memory usage:
+
+```go
+package main
+
+import "time"
+
+func main() {
+ time.Sleep(600e9)
+}
+```
+
+Baseline
+--------
+
+```
+VmPeak: 42604 kB
+VmSize: 42604 kB
+VmLck: 0 kB
+VmHWM: 1120 kB
+VmRSS: 1120 kB
+VmData: 35460 kB
+VmStk: 136 kB
+VmExe: 1020 kB
+VmLib: 1848 kB
+VmPTE: 36 kB
+VmSwap: 0 kB
+```
+
+Program used to gather metric memory usage (with other metrics being similar):
+
+```go
+package main
+
+import (
+ "fmt"
+ "metrics"
+ "time"
+)
+
+func main() {
+ fmt.Sprintf("foo")
+ metrics.NewRegistry()
+ time.Sleep(600e9)
+}
+```
+
+1000 counters registered
+------------------------
+
+```
+VmPeak: 44016 kB
+VmSize: 44016 kB
+VmLck: 0 kB
+VmHWM: 1928 kB
+VmRSS: 1928 kB
+VmData: 36868 kB
+VmStk: 136 kB
+VmExe: 1024 kB
+VmLib: 1848 kB
+VmPTE: 40 kB
+VmSwap: 0 kB
+```
+
+**1.412 kB virtual, TODO 0.808 kB resident per counter.**
+
+100000 counters registered
+--------------------------
+
+```
+VmPeak: 55024 kB
+VmSize: 55024 kB
+VmLck: 0 kB
+VmHWM: 12440 kB
+VmRSS: 12440 kB
+VmData: 47876 kB
+VmStk: 136 kB
+VmExe: 1024 kB
+VmLib: 1848 kB
+VmPTE: 64 kB
+VmSwap: 0 kB
+```
+
+**0.1242 kB virtual, 0.1132 kB resident per counter.**
+
+1000 gauges registered
+----------------------
+
+```
+VmPeak: 44012 kB
+VmSize: 44012 kB
+VmLck: 0 kB
+VmHWM: 1928 kB
+VmRSS: 1928 kB
+VmData: 36868 kB
+VmStk: 136 kB
+VmExe: 1020 kB
+VmLib: 1848 kB
+VmPTE: 40 kB
+VmSwap: 0 kB
+```
+
+**1.408 kB virtual, 0.808 kB resident per counter.**
+
+100000 gauges registered
+------------------------
+
+```
+VmPeak: 55020 kB
+VmSize: 55020 kB
+VmLck: 0 kB
+VmHWM: 12432 kB
+VmRSS: 12432 kB
+VmData: 47876 kB
+VmStk: 136 kB
+VmExe: 1020 kB
+VmLib: 1848 kB
+VmPTE: 60 kB
+VmSwap: 0 kB
+```
+
+**0.12416 kB virtual, 0.11312 resident per gauge.**
+
+1000 histograms with a uniform sample size of 1028
+--------------------------------------------------
+
+```
+VmPeak: 72272 kB
+VmSize: 72272 kB
+VmLck: 0 kB
+VmHWM: 16204 kB
+VmRSS: 16204 kB
+VmData: 65100 kB
+VmStk: 136 kB
+VmExe: 1048 kB
+VmLib: 1848 kB
+VmPTE: 80 kB
+VmSwap: 0 kB
+```
+
+**29.668 kB virtual, TODO 15.084 resident per histogram.**
+
+10000 histograms with a uniform sample size of 1028
+---------------------------------------------------
+
+```
+VmPeak: 256912 kB
+VmSize: 256912 kB
+VmLck: 0 kB
+VmHWM: 146204 kB
+VmRSS: 146204 kB
+VmData: 249740 kB
+VmStk: 136 kB
+VmExe: 1048 kB
+VmLib: 1848 kB
+VmPTE: 448 kB
+VmSwap: 0 kB
+```
+
+**21.4308 kB virtual, 14.5084 kB resident per histogram.**
+
+50000 histograms with a uniform sample size of 1028
+---------------------------------------------------
+
+```
+VmPeak: 908112 kB
+VmSize: 908112 kB
+VmLck: 0 kB
+VmHWM: 645832 kB
+VmRSS: 645588 kB
+VmData: 900940 kB
+VmStk: 136 kB
+VmExe: 1048 kB
+VmLib: 1848 kB
+VmPTE: 1716 kB
+VmSwap: 1544 kB
+```
+
+**17.31016 kB virtual, 12.88936 kB resident per histogram.**
+
+1000 histograms with an exponentially-decaying sample size of 1028 and alpha of 0.015
+-------------------------------------------------------------------------------------
+
+```
+VmPeak: 62480 kB
+VmSize: 62480 kB
+VmLck: 0 kB
+VmHWM: 11572 kB
+VmRSS: 11572 kB
+VmData: 55308 kB
+VmStk: 136 kB
+VmExe: 1048 kB
+VmLib: 1848 kB
+VmPTE: 64 kB
+VmSwap: 0 kB
+```
+
+**19.876 kB virtual, 10.452 kB resident per histogram.**
+
+10000 histograms with an exponentially-decaying sample size of 1028 and alpha of 0.015
+--------------------------------------------------------------------------------------
+
+```
+VmPeak: 153296 kB
+VmSize: 153296 kB
+VmLck: 0 kB
+VmHWM: 101176 kB
+VmRSS: 101176 kB
+VmData: 146124 kB
+VmStk: 136 kB
+VmExe: 1048 kB
+VmLib: 1848 kB
+VmPTE: 240 kB
+VmSwap: 0 kB
+```
+
+**11.0692 kB virtual, 10.0056 kB resident per histogram.**
+
+50000 histograms with an exponentially-decaying sample size of 1028 and alpha of 0.015
+--------------------------------------------------------------------------------------
+
+```
+VmPeak: 557264 kB
+VmSize: 557264 kB
+VmLck: 0 kB
+VmHWM: 501056 kB
+VmRSS: 501056 kB
+VmData: 550092 kB
+VmStk: 136 kB
+VmExe: 1048 kB
+VmLib: 1848 kB
+VmPTE: 1032 kB
+VmSwap: 0 kB
+```
+
+**10.2932 kB virtual, 9.99872 kB resident per histogram.**
+
+1000 meters
+-----------
+
+```
+VmPeak: 74504 kB
+VmSize: 74504 kB
+VmLck: 0 kB
+VmHWM: 24124 kB
+VmRSS: 24124 kB
+VmData: 67340 kB
+VmStk: 136 kB
+VmExe: 1040 kB
+VmLib: 1848 kB
+VmPTE: 92 kB
+VmSwap: 0 kB
+```
+
+**31.9 kB virtual, 23.004 kB resident per meter.**
+
+10000 meters
+------------
+
+```
+VmPeak: 278920 kB
+VmSize: 278920 kB
+VmLck: 0 kB
+VmHWM: 227300 kB
+VmRSS: 227300 kB
+VmData: 271756 kB
+VmStk: 136 kB
+VmExe: 1040 kB
+VmLib: 1848 kB
+VmPTE: 488 kB
+VmSwap: 0 kB
+```
+
+**23.6316 kB virtual, 22.618 kB resident per meter.**
diff --git a/vendor/github.com/rcrowley/go-metrics/meter.go b/vendor/github.com/rcrowley/go-metrics/meter.go
new file mode 100644
index 00000000..0389ab0b
--- /dev/null
+++ b/vendor/github.com/rcrowley/go-metrics/meter.go
@@ -0,0 +1,233 @@
+package metrics
+
+import (
+ "sync"
+ "time"
+)
+
+// Meters count events to produce exponentially-weighted moving average rates
+// at one-, five-, and fifteen-minutes and a mean rate.
+type Meter interface {
+ Count() int64
+ Mark(int64)
+ Rate1() float64
+ Rate5() float64
+ Rate15() float64
+ RateMean() float64
+ Snapshot() Meter
+}
+
+// GetOrRegisterMeter returns an existing Meter or constructs and registers a
+// new StandardMeter.
+func GetOrRegisterMeter(name string, r Registry) Meter {
+ if nil == r {
+ r = DefaultRegistry
+ }
+ return r.GetOrRegister(name, NewMeter).(Meter)
+}
+
+// NewMeter constructs a new StandardMeter and launches a goroutine.
+func NewMeter() Meter {
+ if UseNilMetrics {
+ return NilMeter{}
+ }
+ m := newStandardMeter()
+ arbiter.Lock()
+ defer arbiter.Unlock()
+ arbiter.meters = append(arbiter.meters, m)
+ if !arbiter.started {
+ arbiter.started = true
+ go arbiter.tick()
+ }
+ return m
+}
+
+// NewMeter constructs and registers a new StandardMeter and launches a
+// goroutine.
+func NewRegisteredMeter(name string, r Registry) Meter {
+ c := NewMeter()
+ if nil == r {
+ r = DefaultRegistry
+ }
+ r.Register(name, c)
+ return c
+}
+
+// MeterSnapshot is a read-only copy of another Meter.
+type MeterSnapshot struct {
+ count int64
+ rate1, rate5, rate15, rateMean float64
+}
+
+// Count returns the count of events at the time the snapshot was taken.
+func (m *MeterSnapshot) Count() int64 { return m.count }
+
+// Mark panics.
+func (*MeterSnapshot) Mark(n int64) {
+ panic("Mark called on a MeterSnapshot")
+}
+
+// Rate1 returns the one-minute moving average rate of events per second at the
+// time the snapshot was taken.
+func (m *MeterSnapshot) Rate1() float64 { return m.rate1 }
+
+// Rate5 returns the five-minute moving average rate of events per second at
+// the time the snapshot was taken.
+func (m *MeterSnapshot) Rate5() float64 { return m.rate5 }
+
+// Rate15 returns the fifteen-minute moving average rate of events per second
+// at the time the snapshot was taken.
+func (m *MeterSnapshot) Rate15() float64 { return m.rate15 }
+
+// RateMean returns the meter's mean rate of events per second at the time the
+// snapshot was taken.
+func (m *MeterSnapshot) RateMean() float64 { return m.rateMean }
+
+// Snapshot returns the snapshot.
+func (m *MeterSnapshot) Snapshot() Meter { return m }
+
+// NilMeter is a no-op Meter.
+type NilMeter struct{}
+
+// Count is a no-op.
+func (NilMeter) Count() int64 { return 0 }
+
+// Mark is a no-op.
+func (NilMeter) Mark(n int64) {}
+
+// Rate1 is a no-op.
+func (NilMeter) Rate1() float64 { return 0.0 }
+
+// Rate5 is a no-op.
+func (NilMeter) Rate5() float64 { return 0.0 }
+
+// Rate15is a no-op.
+func (NilMeter) Rate15() float64 { return 0.0 }
+
+// RateMean is a no-op.
+func (NilMeter) RateMean() float64 { return 0.0 }
+
+// Snapshot is a no-op.
+func (NilMeter) Snapshot() Meter { return NilMeter{} }
+
+// StandardMeter is the standard implementation of a Meter.
+type StandardMeter struct {
+ lock sync.RWMutex
+ snapshot *MeterSnapshot
+ a1, a5, a15 EWMA
+ startTime time.Time
+}
+
+func newStandardMeter() *StandardMeter {
+ return &StandardMeter{
+ snapshot: &MeterSnapshot{},
+ a1: NewEWMA1(),
+ a5: NewEWMA5(),
+ a15: NewEWMA15(),
+ startTime: time.Now(),
+ }
+}
+
+// Count returns the number of events recorded.
+func (m *StandardMeter) Count() int64 {
+ m.lock.RLock()
+ count := m.snapshot.count
+ m.lock.RUnlock()
+ return count
+}
+
+// Mark records the occurance of n events.
+func (m *StandardMeter) Mark(n int64) {
+ m.lock.Lock()
+ defer m.lock.Unlock()
+ m.snapshot.count += n
+ m.a1.Update(n)
+ m.a5.Update(n)
+ m.a15.Update(n)
+ m.updateSnapshot()
+}
+
+// Rate1 returns the one-minute moving average rate of events per second.
+func (m *StandardMeter) Rate1() float64 {
+ m.lock.RLock()
+ rate1 := m.snapshot.rate1
+ m.lock.RUnlock()
+ return rate1
+}
+
+// Rate5 returns the five-minute moving average rate of events per second.
+func (m *StandardMeter) Rate5() float64 {
+ m.lock.RLock()
+ rate5 := m.snapshot.rate5
+ m.lock.RUnlock()
+ return rate5
+}
+
+// Rate15 returns the fifteen-minute moving average rate of events per second.
+func (m *StandardMeter) Rate15() float64 {
+ m.lock.RLock()
+ rate15 := m.snapshot.rate15
+ m.lock.RUnlock()
+ return rate15
+}
+
+// RateMean returns the meter's mean rate of events per second.
+func (m *StandardMeter) RateMean() float64 {
+ m.lock.RLock()
+ rateMean := m.snapshot.rateMean
+ m.lock.RUnlock()
+ return rateMean
+}
+
+// Snapshot returns a read-only copy of the meter.
+func (m *StandardMeter) Snapshot() Meter {
+ m.lock.RLock()
+ snapshot := *m.snapshot
+ m.lock.RUnlock()
+ return &snapshot
+}
+
+func (m *StandardMeter) updateSnapshot() {
+ // should run with write lock held on m.lock
+ snapshot := m.snapshot
+ snapshot.rate1 = m.a1.Rate()
+ snapshot.rate5 = m.a5.Rate()
+ snapshot.rate15 = m.a15.Rate()
+ snapshot.rateMean = float64(snapshot.count) / time.Since(m.startTime).Seconds()
+}
+
+func (m *StandardMeter) tick() {
+ m.lock.Lock()
+ defer m.lock.Unlock()
+ m.a1.Tick()
+ m.a5.Tick()
+ m.a15.Tick()
+ m.updateSnapshot()
+}
+
+type meterArbiter struct {
+ sync.RWMutex
+ started bool
+ meters []*StandardMeter
+ ticker *time.Ticker
+}
+
+var arbiter = meterArbiter{ticker: time.NewTicker(5e9)}
+
+// Ticks meters on the scheduled interval
+func (ma *meterArbiter) tick() {
+ for {
+ select {
+ case <-ma.ticker.C:
+ ma.tickMeters()
+ }
+ }
+}
+
+func (ma *meterArbiter) tickMeters() {
+ ma.RLock()
+ defer ma.RUnlock()
+ for _, meter := range ma.meters {
+ meter.tick()
+ }
+}
diff --git a/vendor/github.com/rcrowley/go-metrics/metrics.go b/vendor/github.com/rcrowley/go-metrics/metrics.go
new file mode 100644
index 00000000..b97a49ed
--- /dev/null
+++ b/vendor/github.com/rcrowley/go-metrics/metrics.go
@@ -0,0 +1,13 @@
+// Go port of Coda Hale's Metrics library
+//
+//
+//
+// Coda Hale's original work:
+package metrics
+
+// UseNilMetrics is checked by the constructor functions for all of the
+// standard metrics. If it is true, the metric returned is a stub.
+//
+// This global kill-switch helps quantify the observer effect and makes
+// for less cluttered pprof profiles.
+var UseNilMetrics bool = false
diff --git a/vendor/github.com/rcrowley/go-metrics/opentsdb.go b/vendor/github.com/rcrowley/go-metrics/opentsdb.go
new file mode 100644
index 00000000..266b6c93
--- /dev/null
+++ b/vendor/github.com/rcrowley/go-metrics/opentsdb.go
@@ -0,0 +1,119 @@
+package metrics
+
+import (
+ "bufio"
+ "fmt"
+ "log"
+ "net"
+ "os"
+ "strings"
+ "time"
+)
+
+var shortHostName string = ""
+
+// OpenTSDBConfig provides a container with configuration parameters for
+// the OpenTSDB exporter
+type OpenTSDBConfig struct {
+ Addr *net.TCPAddr // Network address to connect to
+ Registry Registry // Registry to be exported
+ FlushInterval time.Duration // Flush interval
+ DurationUnit time.Duration // Time conversion unit for durations
+ Prefix string // Prefix to be prepended to metric names
+}
+
+// OpenTSDB is a blocking exporter function which reports metrics in r
+// to a TSDB server located at addr, flushing them every d duration
+// and prepending metric names with prefix.
+func OpenTSDB(r Registry, d time.Duration, prefix string, addr *net.TCPAddr) {
+ OpenTSDBWithConfig(OpenTSDBConfig{
+ Addr: addr,
+ Registry: r,
+ FlushInterval: d,
+ DurationUnit: time.Nanosecond,
+ Prefix: prefix,
+ })
+}
+
+// OpenTSDBWithConfig is a blocking exporter function just like OpenTSDB,
+// but it takes a OpenTSDBConfig instead.
+func OpenTSDBWithConfig(c OpenTSDBConfig) {
+ for _ = range time.Tick(c.FlushInterval) {
+ if err := openTSDB(&c); nil != err {
+ log.Println(err)
+ }
+ }
+}
+
+func getShortHostname() string {
+ if shortHostName == "" {
+ host, _ := os.Hostname()
+ if index := strings.Index(host, "."); index > 0 {
+ shortHostName = host[:index]
+ } else {
+ shortHostName = host
+ }
+ }
+ return shortHostName
+}
+
+func openTSDB(c *OpenTSDBConfig) error {
+ shortHostname := getShortHostname()
+ now := time.Now().Unix()
+ du := float64(c.DurationUnit)
+ conn, err := net.DialTCP("tcp", nil, c.Addr)
+ if nil != err {
+ return err
+ }
+ defer conn.Close()
+ w := bufio.NewWriter(conn)
+ c.Registry.Each(func(name string, i interface{}) {
+ switch metric := i.(type) {
+ case Counter:
+ fmt.Fprintf(w, "put %s.%s.count %d %d host=%s\n", c.Prefix, name, now, metric.Count(), shortHostname)
+ case Gauge:
+ fmt.Fprintf(w, "put %s.%s.value %d %d host=%s\n", c.Prefix, name, now, metric.Value(), shortHostname)
+ case GaugeFloat64:
+ fmt.Fprintf(w, "put %s.%s.value %d %f host=%s\n", c.Prefix, name, now, metric.Value(), shortHostname)
+ case Histogram:
+ h := metric.Snapshot()
+ ps := h.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999})
+ fmt.Fprintf(w, "put %s.%s.count %d %d host=%s\n", c.Prefix, name, now, h.Count(), shortHostname)
+ fmt.Fprintf(w, "put %s.%s.min %d %d host=%s\n", c.Prefix, name, now, h.Min(), shortHostname)
+ fmt.Fprintf(w, "put %s.%s.max %d %d host=%s\n", c.Prefix, name, now, h.Max(), shortHostname)
+ fmt.Fprintf(w, "put %s.%s.mean %d %.2f host=%s\n", c.Prefix, name, now, h.Mean(), shortHostname)
+ fmt.Fprintf(w, "put %s.%s.std-dev %d %.2f host=%s\n", c.Prefix, name, now, h.StdDev(), shortHostname)
+ fmt.Fprintf(w, "put %s.%s.50-percentile %d %.2f host=%s\n", c.Prefix, name, now, ps[0], shortHostname)
+ fmt.Fprintf(w, "put %s.%s.75-percentile %d %.2f host=%s\n", c.Prefix, name, now, ps[1], shortHostname)
+ fmt.Fprintf(w, "put %s.%s.95-percentile %d %.2f host=%s\n", c.Prefix, name, now, ps[2], shortHostname)
+ fmt.Fprintf(w, "put %s.%s.99-percentile %d %.2f host=%s\n", c.Prefix, name, now, ps[3], shortHostname)
+ fmt.Fprintf(w, "put %s.%s.999-percentile %d %.2f host=%s\n", c.Prefix, name, now, ps[4], shortHostname)
+ case Meter:
+ m := metric.Snapshot()
+ fmt.Fprintf(w, "put %s.%s.count %d %d host=%s\n", c.Prefix, name, now, m.Count(), shortHostname)
+ fmt.Fprintf(w, "put %s.%s.one-minute %d %.2f host=%s\n", c.Prefix, name, now, m.Rate1(), shortHostname)
+ fmt.Fprintf(w, "put %s.%s.five-minute %d %.2f host=%s\n", c.Prefix, name, now, m.Rate5(), shortHostname)
+ fmt.Fprintf(w, "put %s.%s.fifteen-minute %d %.2f host=%s\n", c.Prefix, name, now, m.Rate15(), shortHostname)
+ fmt.Fprintf(w, "put %s.%s.mean %d %.2f host=%s\n", c.Prefix, name, now, m.RateMean(), shortHostname)
+ case Timer:
+ t := metric.Snapshot()
+ ps := t.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999})
+ fmt.Fprintf(w, "put %s.%s.count %d %d host=%s\n", c.Prefix, name, now, t.Count(), shortHostname)
+ fmt.Fprintf(w, "put %s.%s.min %d %d host=%s\n", c.Prefix, name, now, t.Min()/int64(du), shortHostname)
+ fmt.Fprintf(w, "put %s.%s.max %d %d host=%s\n", c.Prefix, name, now, t.Max()/int64(du), shortHostname)
+ fmt.Fprintf(w, "put %s.%s.mean %d %.2f host=%s\n", c.Prefix, name, now, t.Mean()/du, shortHostname)
+ fmt.Fprintf(w, "put %s.%s.std-dev %d %.2f host=%s\n", c.Prefix, name, now, t.StdDev()/du, shortHostname)
+ fmt.Fprintf(w, "put %s.%s.50-percentile %d %.2f host=%s\n", c.Prefix, name, now, ps[0]/du, shortHostname)
+ fmt.Fprintf(w, "put %s.%s.75-percentile %d %.2f host=%s\n", c.Prefix, name, now, ps[1]/du, shortHostname)
+ fmt.Fprintf(w, "put %s.%s.95-percentile %d %.2f host=%s\n", c.Prefix, name, now, ps[2]/du, shortHostname)
+ fmt.Fprintf(w, "put %s.%s.99-percentile %d %.2f host=%s\n", c.Prefix, name, now, ps[3]/du, shortHostname)
+ fmt.Fprintf(w, "put %s.%s.999-percentile %d %.2f host=%s\n", c.Prefix, name, now, ps[4]/du, shortHostname)
+ fmt.Fprintf(w, "put %s.%s.one-minute %d %.2f host=%s\n", c.Prefix, name, now, t.Rate1(), shortHostname)
+ fmt.Fprintf(w, "put %s.%s.five-minute %d %.2f host=%s\n", c.Prefix, name, now, t.Rate5(), shortHostname)
+ fmt.Fprintf(w, "put %s.%s.fifteen-minute %d %.2f host=%s\n", c.Prefix, name, now, t.Rate15(), shortHostname)
+ fmt.Fprintf(w, "put %s.%s.mean-rate %d %.2f host=%s\n", c.Prefix, name, now, t.RateMean(), shortHostname)
+ }
+ w.Flush()
+ })
+ return nil
+}
diff --git a/vendor/github.com/rcrowley/go-metrics/registry.go b/vendor/github.com/rcrowley/go-metrics/registry.go
new file mode 100644
index 00000000..2bb7a1e7
--- /dev/null
+++ b/vendor/github.com/rcrowley/go-metrics/registry.go
@@ -0,0 +1,270 @@
+package metrics
+
+import (
+ "fmt"
+ "reflect"
+ "strings"
+ "sync"
+)
+
+// DuplicateMetric is the error returned by Registry.Register when a metric
+// already exists. If you mean to Register that metric you must first
+// Unregister the existing metric.
+type DuplicateMetric string
+
+func (err DuplicateMetric) Error() string {
+ return fmt.Sprintf("duplicate metric: %s", string(err))
+}
+
+// A Registry holds references to a set of metrics by name and can iterate
+// over them, calling callback functions provided by the user.
+//
+// This is an interface so as to encourage other structs to implement
+// the Registry API as appropriate.
+type Registry interface {
+
+ // Call the given function for each registered metric.
+ Each(func(string, interface{}))
+
+ // Get the metric by the given name or nil if none is registered.
+ Get(string) interface{}
+
+ // Gets an existing metric or registers the given one.
+ // The interface can be the metric to register if not found in registry,
+ // or a function returning the metric for lazy instantiation.
+ GetOrRegister(string, interface{}) interface{}
+
+ // Register the given metric under the given name.
+ Register(string, interface{}) error
+
+ // Run all registered healthchecks.
+ RunHealthchecks()
+
+ // Unregister the metric with the given name.
+ Unregister(string)
+
+ // Unregister all metrics. (Mostly for testing.)
+ UnregisterAll()
+}
+
+// The standard implementation of a Registry is a mutex-protected map
+// of names to metrics.
+type StandardRegistry struct {
+ metrics map[string]interface{}
+ mutex sync.Mutex
+}
+
+// Create a new registry.
+func NewRegistry() Registry {
+ return &StandardRegistry{metrics: make(map[string]interface{})}
+}
+
+// Call the given function for each registered metric.
+func (r *StandardRegistry) Each(f func(string, interface{})) {
+ for name, i := range r.registered() {
+ f(name, i)
+ }
+}
+
+// Get the metric by the given name or nil if none is registered.
+func (r *StandardRegistry) Get(name string) interface{} {
+ r.mutex.Lock()
+ defer r.mutex.Unlock()
+ return r.metrics[name]
+}
+
+// Gets an existing metric or creates and registers a new one. Threadsafe
+// alternative to calling Get and Register on failure.
+// The interface can be the metric to register if not found in registry,
+// or a function returning the metric for lazy instantiation.
+func (r *StandardRegistry) GetOrRegister(name string, i interface{}) interface{} {
+ r.mutex.Lock()
+ defer r.mutex.Unlock()
+ if metric, ok := r.metrics[name]; ok {
+ return metric
+ }
+ if v := reflect.ValueOf(i); v.Kind() == reflect.Func {
+ i = v.Call(nil)[0].Interface()
+ }
+ r.register(name, i)
+ return i
+}
+
+// Register the given metric under the given name. Returns a DuplicateMetric
+// if a metric by the given name is already registered.
+func (r *StandardRegistry) Register(name string, i interface{}) error {
+ r.mutex.Lock()
+ defer r.mutex.Unlock()
+ return r.register(name, i)
+}
+
+// Run all registered healthchecks.
+func (r *StandardRegistry) RunHealthchecks() {
+ r.mutex.Lock()
+ defer r.mutex.Unlock()
+ for _, i := range r.metrics {
+ if h, ok := i.(Healthcheck); ok {
+ h.Check()
+ }
+ }
+}
+
+// Unregister the metric with the given name.
+func (r *StandardRegistry) Unregister(name string) {
+ r.mutex.Lock()
+ defer r.mutex.Unlock()
+ delete(r.metrics, name)
+}
+
+// Unregister all metrics. (Mostly for testing.)
+func (r *StandardRegistry) UnregisterAll() {
+ r.mutex.Lock()
+ defer r.mutex.Unlock()
+ for name, _ := range r.metrics {
+ delete(r.metrics, name)
+ }
+}
+
+func (r *StandardRegistry) register(name string, i interface{}) error {
+ if _, ok := r.metrics[name]; ok {
+ return DuplicateMetric(name)
+ }
+ switch i.(type) {
+ case Counter, Gauge, GaugeFloat64, Healthcheck, Histogram, Meter, Timer:
+ r.metrics[name] = i
+ }
+ return nil
+}
+
+func (r *StandardRegistry) registered() map[string]interface{} {
+ r.mutex.Lock()
+ defer r.mutex.Unlock()
+ metrics := make(map[string]interface{}, len(r.metrics))
+ for name, i := range r.metrics {
+ metrics[name] = i
+ }
+ return metrics
+}
+
+type PrefixedRegistry struct {
+ underlying Registry
+ prefix string
+}
+
+func NewPrefixedRegistry(prefix string) Registry {
+ return &PrefixedRegistry{
+ underlying: NewRegistry(),
+ prefix: prefix,
+ }
+}
+
+func NewPrefixedChildRegistry(parent Registry, prefix string) Registry {
+ return &PrefixedRegistry{
+ underlying: parent,
+ prefix: prefix,
+ }
+}
+
+// Call the given function for each registered metric.
+func (r *PrefixedRegistry) Each(fn func(string, interface{})) {
+ wrappedFn := func(prefix string) func(string, interface{}) {
+ return func(name string, iface interface{}) {
+ if strings.HasPrefix(name, prefix) {
+ fn(name, iface)
+ } else {
+ return
+ }
+ }
+ }
+
+ baseRegistry, prefix := findPrefix(r, "")
+ baseRegistry.Each(wrappedFn(prefix))
+}
+
+func findPrefix(registry Registry, prefix string) (Registry, string) {
+ switch r := registry.(type) {
+ case *PrefixedRegistry:
+ return findPrefix(r.underlying, r.prefix+prefix)
+ case *StandardRegistry:
+ return r, prefix
+ }
+ return nil, ""
+}
+
+// Get the metric by the given name or nil if none is registered.
+func (r *PrefixedRegistry) Get(name string) interface{} {
+ realName := r.prefix + name
+ return r.underlying.Get(realName)
+}
+
+// Gets an existing metric or registers the given one.
+// The interface can be the metric to register if not found in registry,
+// or a function returning the metric for lazy instantiation.
+func (r *PrefixedRegistry) GetOrRegister(name string, metric interface{}) interface{} {
+ realName := r.prefix + name
+ return r.underlying.GetOrRegister(realName, metric)
+}
+
+// Register the given metric under the given name. The name will be prefixed.
+func (r *PrefixedRegistry) Register(name string, metric interface{}) error {
+ realName := r.prefix + name
+ return r.underlying.Register(realName, metric)
+}
+
+// Run all registered healthchecks.
+func (r *PrefixedRegistry) RunHealthchecks() {
+ r.underlying.RunHealthchecks()
+}
+
+// Unregister the metric with the given name. The name will be prefixed.
+func (r *PrefixedRegistry) Unregister(name string) {
+ realName := r.prefix + name
+ r.underlying.Unregister(realName)
+}
+
+// Unregister all metrics. (Mostly for testing.)
+func (r *PrefixedRegistry) UnregisterAll() {
+ r.underlying.UnregisterAll()
+}
+
+var DefaultRegistry Registry = NewRegistry()
+
+// Call the given function for each registered metric.
+func Each(f func(string, interface{})) {
+ DefaultRegistry.Each(f)
+}
+
+// Get the metric by the given name or nil if none is registered.
+func Get(name string) interface{} {
+ return DefaultRegistry.Get(name)
+}
+
+// Gets an existing metric or creates and registers a new one. Threadsafe
+// alternative to calling Get and Register on failure.
+func GetOrRegister(name string, i interface{}) interface{} {
+ return DefaultRegistry.GetOrRegister(name, i)
+}
+
+// Register the given metric under the given name. Returns a DuplicateMetric
+// if a metric by the given name is already registered.
+func Register(name string, i interface{}) error {
+ return DefaultRegistry.Register(name, i)
+}
+
+// Register the given metric under the given name. Panics if a metric by the
+// given name is already registered.
+func MustRegister(name string, i interface{}) {
+ if err := Register(name, i); err != nil {
+ panic(err)
+ }
+}
+
+// Run all registered healthchecks.
+func RunHealthchecks() {
+ DefaultRegistry.RunHealthchecks()
+}
+
+// Unregister the metric with the given name.
+func Unregister(name string) {
+ DefaultRegistry.Unregister(name)
+}
diff --git a/vendor/github.com/rcrowley/go-metrics/runtime.go b/vendor/github.com/rcrowley/go-metrics/runtime.go
new file mode 100644
index 00000000..11c6b785
--- /dev/null
+++ b/vendor/github.com/rcrowley/go-metrics/runtime.go
@@ -0,0 +1,212 @@
+package metrics
+
+import (
+ "runtime"
+ "runtime/pprof"
+ "time"
+)
+
+var (
+ memStats runtime.MemStats
+ runtimeMetrics struct {
+ MemStats struct {
+ Alloc Gauge
+ BuckHashSys Gauge
+ DebugGC Gauge
+ EnableGC Gauge
+ Frees Gauge
+ HeapAlloc Gauge
+ HeapIdle Gauge
+ HeapInuse Gauge
+ HeapObjects Gauge
+ HeapReleased Gauge
+ HeapSys Gauge
+ LastGC Gauge
+ Lookups Gauge
+ Mallocs Gauge
+ MCacheInuse Gauge
+ MCacheSys Gauge
+ MSpanInuse Gauge
+ MSpanSys Gauge
+ NextGC Gauge
+ NumGC Gauge
+ GCCPUFraction GaugeFloat64
+ PauseNs Histogram
+ PauseTotalNs Gauge
+ StackInuse Gauge
+ StackSys Gauge
+ Sys Gauge
+ TotalAlloc Gauge
+ }
+ NumCgoCall Gauge
+ NumGoroutine Gauge
+ NumThread Gauge
+ ReadMemStats Timer
+ }
+ frees uint64
+ lookups uint64
+ mallocs uint64
+ numGC uint32
+ numCgoCalls int64
+
+ threadCreateProfile = pprof.Lookup("threadcreate")
+)
+
+// Capture new values for the Go runtime statistics exported in
+// runtime.MemStats. This is designed to be called as a goroutine.
+func CaptureRuntimeMemStats(r Registry, d time.Duration) {
+ for _ = range time.Tick(d) {
+ CaptureRuntimeMemStatsOnce(r)
+ }
+}
+
+// Capture new values for the Go runtime statistics exported in
+// runtime.MemStats. This is designed to be called in a background
+// goroutine. Giving a registry which has not been given to
+// RegisterRuntimeMemStats will panic.
+//
+// Be very careful with this because runtime.ReadMemStats calls the C
+// functions runtime·semacquire(&runtime·worldsema) and runtime·stoptheworld()
+// and that last one does what it says on the tin.
+func CaptureRuntimeMemStatsOnce(r Registry) {
+ t := time.Now()
+ runtime.ReadMemStats(&memStats) // This takes 50-200us.
+ runtimeMetrics.ReadMemStats.UpdateSince(t)
+
+ runtimeMetrics.MemStats.Alloc.Update(int64(memStats.Alloc))
+ runtimeMetrics.MemStats.BuckHashSys.Update(int64(memStats.BuckHashSys))
+ if memStats.DebugGC {
+ runtimeMetrics.MemStats.DebugGC.Update(1)
+ } else {
+ runtimeMetrics.MemStats.DebugGC.Update(0)
+ }
+ if memStats.EnableGC {
+ runtimeMetrics.MemStats.EnableGC.Update(1)
+ } else {
+ runtimeMetrics.MemStats.EnableGC.Update(0)
+ }
+
+ runtimeMetrics.MemStats.Frees.Update(int64(memStats.Frees - frees))
+ runtimeMetrics.MemStats.HeapAlloc.Update(int64(memStats.HeapAlloc))
+ runtimeMetrics.MemStats.HeapIdle.Update(int64(memStats.HeapIdle))
+ runtimeMetrics.MemStats.HeapInuse.Update(int64(memStats.HeapInuse))
+ runtimeMetrics.MemStats.HeapObjects.Update(int64(memStats.HeapObjects))
+ runtimeMetrics.MemStats.HeapReleased.Update(int64(memStats.HeapReleased))
+ runtimeMetrics.MemStats.HeapSys.Update(int64(memStats.HeapSys))
+ runtimeMetrics.MemStats.LastGC.Update(int64(memStats.LastGC))
+ runtimeMetrics.MemStats.Lookups.Update(int64(memStats.Lookups - lookups))
+ runtimeMetrics.MemStats.Mallocs.Update(int64(memStats.Mallocs - mallocs))
+ runtimeMetrics.MemStats.MCacheInuse.Update(int64(memStats.MCacheInuse))
+ runtimeMetrics.MemStats.MCacheSys.Update(int64(memStats.MCacheSys))
+ runtimeMetrics.MemStats.MSpanInuse.Update(int64(memStats.MSpanInuse))
+ runtimeMetrics.MemStats.MSpanSys.Update(int64(memStats.MSpanSys))
+ runtimeMetrics.MemStats.NextGC.Update(int64(memStats.NextGC))
+ runtimeMetrics.MemStats.NumGC.Update(int64(memStats.NumGC - numGC))
+ runtimeMetrics.MemStats.GCCPUFraction.Update(gcCPUFraction(&memStats))
+
+ //
+ i := numGC % uint32(len(memStats.PauseNs))
+ ii := memStats.NumGC % uint32(len(memStats.PauseNs))
+ if memStats.NumGC-numGC >= uint32(len(memStats.PauseNs)) {
+ for i = 0; i < uint32(len(memStats.PauseNs)); i++ {
+ runtimeMetrics.MemStats.PauseNs.Update(int64(memStats.PauseNs[i]))
+ }
+ } else {
+ if i > ii {
+ for ; i < uint32(len(memStats.PauseNs)); i++ {
+ runtimeMetrics.MemStats.PauseNs.Update(int64(memStats.PauseNs[i]))
+ }
+ i = 0
+ }
+ for ; i < ii; i++ {
+ runtimeMetrics.MemStats.PauseNs.Update(int64(memStats.PauseNs[i]))
+ }
+ }
+ frees = memStats.Frees
+ lookups = memStats.Lookups
+ mallocs = memStats.Mallocs
+ numGC = memStats.NumGC
+
+ runtimeMetrics.MemStats.PauseTotalNs.Update(int64(memStats.PauseTotalNs))
+ runtimeMetrics.MemStats.StackInuse.Update(int64(memStats.StackInuse))
+ runtimeMetrics.MemStats.StackSys.Update(int64(memStats.StackSys))
+ runtimeMetrics.MemStats.Sys.Update(int64(memStats.Sys))
+ runtimeMetrics.MemStats.TotalAlloc.Update(int64(memStats.TotalAlloc))
+
+ currentNumCgoCalls := numCgoCall()
+ runtimeMetrics.NumCgoCall.Update(currentNumCgoCalls - numCgoCalls)
+ numCgoCalls = currentNumCgoCalls
+
+ runtimeMetrics.NumGoroutine.Update(int64(runtime.NumGoroutine()))
+
+ runtimeMetrics.NumThread.Update(int64(threadCreateProfile.Count()))
+}
+
+// Register runtimeMetrics for the Go runtime statistics exported in runtime and
+// specifically runtime.MemStats. The runtimeMetrics are named by their
+// fully-qualified Go symbols, i.e. runtime.MemStats.Alloc.
+func RegisterRuntimeMemStats(r Registry) {
+ runtimeMetrics.MemStats.Alloc = NewGauge()
+ runtimeMetrics.MemStats.BuckHashSys = NewGauge()
+ runtimeMetrics.MemStats.DebugGC = NewGauge()
+ runtimeMetrics.MemStats.EnableGC = NewGauge()
+ runtimeMetrics.MemStats.Frees = NewGauge()
+ runtimeMetrics.MemStats.HeapAlloc = NewGauge()
+ runtimeMetrics.MemStats.HeapIdle = NewGauge()
+ runtimeMetrics.MemStats.HeapInuse = NewGauge()
+ runtimeMetrics.MemStats.HeapObjects = NewGauge()
+ runtimeMetrics.MemStats.HeapReleased = NewGauge()
+ runtimeMetrics.MemStats.HeapSys = NewGauge()
+ runtimeMetrics.MemStats.LastGC = NewGauge()
+ runtimeMetrics.MemStats.Lookups = NewGauge()
+ runtimeMetrics.MemStats.Mallocs = NewGauge()
+ runtimeMetrics.MemStats.MCacheInuse = NewGauge()
+ runtimeMetrics.MemStats.MCacheSys = NewGauge()
+ runtimeMetrics.MemStats.MSpanInuse = NewGauge()
+ runtimeMetrics.MemStats.MSpanSys = NewGauge()
+ runtimeMetrics.MemStats.NextGC = NewGauge()
+ runtimeMetrics.MemStats.NumGC = NewGauge()
+ runtimeMetrics.MemStats.GCCPUFraction = NewGaugeFloat64()
+ runtimeMetrics.MemStats.PauseNs = NewHistogram(NewExpDecaySample(1028, 0.015))
+ runtimeMetrics.MemStats.PauseTotalNs = NewGauge()
+ runtimeMetrics.MemStats.StackInuse = NewGauge()
+ runtimeMetrics.MemStats.StackSys = NewGauge()
+ runtimeMetrics.MemStats.Sys = NewGauge()
+ runtimeMetrics.MemStats.TotalAlloc = NewGauge()
+ runtimeMetrics.NumCgoCall = NewGauge()
+ runtimeMetrics.NumGoroutine = NewGauge()
+ runtimeMetrics.NumThread = NewGauge()
+ runtimeMetrics.ReadMemStats = NewTimer()
+
+ r.Register("runtime.MemStats.Alloc", runtimeMetrics.MemStats.Alloc)
+ r.Register("runtime.MemStats.BuckHashSys", runtimeMetrics.MemStats.BuckHashSys)
+ r.Register("runtime.MemStats.DebugGC", runtimeMetrics.MemStats.DebugGC)
+ r.Register("runtime.MemStats.EnableGC", runtimeMetrics.MemStats.EnableGC)
+ r.Register("runtime.MemStats.Frees", runtimeMetrics.MemStats.Frees)
+ r.Register("runtime.MemStats.HeapAlloc", runtimeMetrics.MemStats.HeapAlloc)
+ r.Register("runtime.MemStats.HeapIdle", runtimeMetrics.MemStats.HeapIdle)
+ r.Register("runtime.MemStats.HeapInuse", runtimeMetrics.MemStats.HeapInuse)
+ r.Register("runtime.MemStats.HeapObjects", runtimeMetrics.MemStats.HeapObjects)
+ r.Register("runtime.MemStats.HeapReleased", runtimeMetrics.MemStats.HeapReleased)
+ r.Register("runtime.MemStats.HeapSys", runtimeMetrics.MemStats.HeapSys)
+ r.Register("runtime.MemStats.LastGC", runtimeMetrics.MemStats.LastGC)
+ r.Register("runtime.MemStats.Lookups", runtimeMetrics.MemStats.Lookups)
+ r.Register("runtime.MemStats.Mallocs", runtimeMetrics.MemStats.Mallocs)
+ r.Register("runtime.MemStats.MCacheInuse", runtimeMetrics.MemStats.MCacheInuse)
+ r.Register("runtime.MemStats.MCacheSys", runtimeMetrics.MemStats.MCacheSys)
+ r.Register("runtime.MemStats.MSpanInuse", runtimeMetrics.MemStats.MSpanInuse)
+ r.Register("runtime.MemStats.MSpanSys", runtimeMetrics.MemStats.MSpanSys)
+ r.Register("runtime.MemStats.NextGC", runtimeMetrics.MemStats.NextGC)
+ r.Register("runtime.MemStats.NumGC", runtimeMetrics.MemStats.NumGC)
+ r.Register("runtime.MemStats.GCCPUFraction", runtimeMetrics.MemStats.GCCPUFraction)
+ r.Register("runtime.MemStats.PauseNs", runtimeMetrics.MemStats.PauseNs)
+ r.Register("runtime.MemStats.PauseTotalNs", runtimeMetrics.MemStats.PauseTotalNs)
+ r.Register("runtime.MemStats.StackInuse", runtimeMetrics.MemStats.StackInuse)
+ r.Register("runtime.MemStats.StackSys", runtimeMetrics.MemStats.StackSys)
+ r.Register("runtime.MemStats.Sys", runtimeMetrics.MemStats.Sys)
+ r.Register("runtime.MemStats.TotalAlloc", runtimeMetrics.MemStats.TotalAlloc)
+ r.Register("runtime.NumCgoCall", runtimeMetrics.NumCgoCall)
+ r.Register("runtime.NumGoroutine", runtimeMetrics.NumGoroutine)
+ r.Register("runtime.NumThread", runtimeMetrics.NumThread)
+ r.Register("runtime.ReadMemStats", runtimeMetrics.ReadMemStats)
+}
diff --git a/vendor/github.com/rcrowley/go-metrics/runtime_cgo.go b/vendor/github.com/rcrowley/go-metrics/runtime_cgo.go
new file mode 100644
index 00000000..e3391f4e
--- /dev/null
+++ b/vendor/github.com/rcrowley/go-metrics/runtime_cgo.go
@@ -0,0 +1,10 @@
+// +build cgo
+// +build !appengine
+
+package metrics
+
+import "runtime"
+
+func numCgoCall() int64 {
+ return runtime.NumCgoCall()
+}
diff --git a/vendor/github.com/rcrowley/go-metrics/runtime_gccpufraction.go b/vendor/github.com/rcrowley/go-metrics/runtime_gccpufraction.go
new file mode 100644
index 00000000..ca12c05b
--- /dev/null
+++ b/vendor/github.com/rcrowley/go-metrics/runtime_gccpufraction.go
@@ -0,0 +1,9 @@
+// +build go1.5
+
+package metrics
+
+import "runtime"
+
+func gcCPUFraction(memStats *runtime.MemStats) float64 {
+ return memStats.GCCPUFraction
+}
diff --git a/vendor/github.com/rcrowley/go-metrics/runtime_no_cgo.go b/vendor/github.com/rcrowley/go-metrics/runtime_no_cgo.go
new file mode 100644
index 00000000..616a3b47
--- /dev/null
+++ b/vendor/github.com/rcrowley/go-metrics/runtime_no_cgo.go
@@ -0,0 +1,7 @@
+// +build !cgo appengine
+
+package metrics
+
+func numCgoCall() int64 {
+ return 0
+}
diff --git a/vendor/github.com/rcrowley/go-metrics/runtime_no_gccpufraction.go b/vendor/github.com/rcrowley/go-metrics/runtime_no_gccpufraction.go
new file mode 100644
index 00000000..be96aa6f
--- /dev/null
+++ b/vendor/github.com/rcrowley/go-metrics/runtime_no_gccpufraction.go
@@ -0,0 +1,9 @@
+// +build !go1.5
+
+package metrics
+
+import "runtime"
+
+func gcCPUFraction(memStats *runtime.MemStats) float64 {
+ return 0
+}
diff --git a/vendor/github.com/rcrowley/go-metrics/sample.go b/vendor/github.com/rcrowley/go-metrics/sample.go
new file mode 100644
index 00000000..fecee5ef
--- /dev/null
+++ b/vendor/github.com/rcrowley/go-metrics/sample.go
@@ -0,0 +1,616 @@
+package metrics
+
+import (
+ "math"
+ "math/rand"
+ "sort"
+ "sync"
+ "time"
+)
+
+const rescaleThreshold = time.Hour
+
+// Samples maintain a statistically-significant selection of values from
+// a stream.
+type Sample interface {
+ Clear()
+ Count() int64
+ Max() int64
+ Mean() float64
+ Min() int64
+ Percentile(float64) float64
+ Percentiles([]float64) []float64
+ Size() int
+ Snapshot() Sample
+ StdDev() float64
+ Sum() int64
+ Update(int64)
+ Values() []int64
+ Variance() float64
+}
+
+// ExpDecaySample is an exponentially-decaying sample using a forward-decaying
+// priority reservoir. See Cormode et al's "Forward Decay: A Practical Time
+// Decay Model for Streaming Systems".
+//
+//
+type ExpDecaySample struct {
+ alpha float64
+ count int64
+ mutex sync.Mutex
+ reservoirSize int
+ t0, t1 time.Time
+ values *expDecaySampleHeap
+}
+
+// NewExpDecaySample constructs a new exponentially-decaying sample with the
+// given reservoir size and alpha.
+func NewExpDecaySample(reservoirSize int, alpha float64) Sample {
+ if UseNilMetrics {
+ return NilSample{}
+ }
+ s := &ExpDecaySample{
+ alpha: alpha,
+ reservoirSize: reservoirSize,
+ t0: time.Now(),
+ values: newExpDecaySampleHeap(reservoirSize),
+ }
+ s.t1 = s.t0.Add(rescaleThreshold)
+ return s
+}
+
+// Clear clears all samples.
+func (s *ExpDecaySample) Clear() {
+ s.mutex.Lock()
+ defer s.mutex.Unlock()
+ s.count = 0
+ s.t0 = time.Now()
+ s.t1 = s.t0.Add(rescaleThreshold)
+ s.values.Clear()
+}
+
+// Count returns the number of samples recorded, which may exceed the
+// reservoir size.
+func (s *ExpDecaySample) Count() int64 {
+ s.mutex.Lock()
+ defer s.mutex.Unlock()
+ return s.count
+}
+
+// Max returns the maximum value in the sample, which may not be the maximum
+// value ever to be part of the sample.
+func (s *ExpDecaySample) Max() int64 {
+ return SampleMax(s.Values())
+}
+
+// Mean returns the mean of the values in the sample.
+func (s *ExpDecaySample) Mean() float64 {
+ return SampleMean(s.Values())
+}
+
+// Min returns the minimum value in the sample, which may not be the minimum
+// value ever to be part of the sample.
+func (s *ExpDecaySample) Min() int64 {
+ return SampleMin(s.Values())
+}
+
+// Percentile returns an arbitrary percentile of values in the sample.
+func (s *ExpDecaySample) Percentile(p float64) float64 {
+ return SamplePercentile(s.Values(), p)
+}
+
+// Percentiles returns a slice of arbitrary percentiles of values in the
+// sample.
+func (s *ExpDecaySample) Percentiles(ps []float64) []float64 {
+ return SamplePercentiles(s.Values(), ps)
+}
+
+// Size returns the size of the sample, which is at most the reservoir size.
+func (s *ExpDecaySample) Size() int {
+ s.mutex.Lock()
+ defer s.mutex.Unlock()
+ return s.values.Size()
+}
+
+// Snapshot returns a read-only copy of the sample.
+func (s *ExpDecaySample) Snapshot() Sample {
+ s.mutex.Lock()
+ defer s.mutex.Unlock()
+ vals := s.values.Values()
+ values := make([]int64, len(vals))
+ for i, v := range vals {
+ values[i] = v.v
+ }
+ return &SampleSnapshot{
+ count: s.count,
+ values: values,
+ }
+}
+
+// StdDev returns the standard deviation of the values in the sample.
+func (s *ExpDecaySample) StdDev() float64 {
+ return SampleStdDev(s.Values())
+}
+
+// Sum returns the sum of the values in the sample.
+func (s *ExpDecaySample) Sum() int64 {
+ return SampleSum(s.Values())
+}
+
+// Update samples a new value.
+func (s *ExpDecaySample) Update(v int64) {
+ s.update(time.Now(), v)
+}
+
+// Values returns a copy of the values in the sample.
+func (s *ExpDecaySample) Values() []int64 {
+ s.mutex.Lock()
+ defer s.mutex.Unlock()
+ vals := s.values.Values()
+ values := make([]int64, len(vals))
+ for i, v := range vals {
+ values[i] = v.v
+ }
+ return values
+}
+
+// Variance returns the variance of the values in the sample.
+func (s *ExpDecaySample) Variance() float64 {
+ return SampleVariance(s.Values())
+}
+
+// update samples a new value at a particular timestamp. This is a method all
+// its own to facilitate testing.
+func (s *ExpDecaySample) update(t time.Time, v int64) {
+ s.mutex.Lock()
+ defer s.mutex.Unlock()
+ s.count++
+ if s.values.Size() == s.reservoirSize {
+ s.values.Pop()
+ }
+ s.values.Push(expDecaySample{
+ k: math.Exp(t.Sub(s.t0).Seconds()*s.alpha) / rand.Float64(),
+ v: v,
+ })
+ if t.After(s.t1) {
+ values := s.values.Values()
+ t0 := s.t0
+ s.values.Clear()
+ s.t0 = t
+ s.t1 = s.t0.Add(rescaleThreshold)
+ for _, v := range values {
+ v.k = v.k * math.Exp(-s.alpha*s.t0.Sub(t0).Seconds())
+ s.values.Push(v)
+ }
+ }
+}
+
+// NilSample is a no-op Sample.
+type NilSample struct{}
+
+// Clear is a no-op.
+func (NilSample) Clear() {}
+
+// Count is a no-op.
+func (NilSample) Count() int64 { return 0 }
+
+// Max is a no-op.
+func (NilSample) Max() int64 { return 0 }
+
+// Mean is a no-op.
+func (NilSample) Mean() float64 { return 0.0 }
+
+// Min is a no-op.
+func (NilSample) Min() int64 { return 0 }
+
+// Percentile is a no-op.
+func (NilSample) Percentile(p float64) float64 { return 0.0 }
+
+// Percentiles is a no-op.
+func (NilSample) Percentiles(ps []float64) []float64 {
+ return make([]float64, len(ps))
+}
+
+// Size is a no-op.
+func (NilSample) Size() int { return 0 }
+
+// Sample is a no-op.
+func (NilSample) Snapshot() Sample { return NilSample{} }
+
+// StdDev is a no-op.
+func (NilSample) StdDev() float64 { return 0.0 }
+
+// Sum is a no-op.
+func (NilSample) Sum() int64 { return 0 }
+
+// Update is a no-op.
+func (NilSample) Update(v int64) {}
+
+// Values is a no-op.
+func (NilSample) Values() []int64 { return []int64{} }
+
+// Variance is a no-op.
+func (NilSample) Variance() float64 { return 0.0 }
+
+// SampleMax returns the maximum value of the slice of int64.
+func SampleMax(values []int64) int64 {
+ if 0 == len(values) {
+ return 0
+ }
+ var max int64 = math.MinInt64
+ for _, v := range values {
+ if max < v {
+ max = v
+ }
+ }
+ return max
+}
+
+// SampleMean returns the mean value of the slice of int64.
+func SampleMean(values []int64) float64 {
+ if 0 == len(values) {
+ return 0.0
+ }
+ return float64(SampleSum(values)) / float64(len(values))
+}
+
+// SampleMin returns the minimum value of the slice of int64.
+func SampleMin(values []int64) int64 {
+ if 0 == len(values) {
+ return 0
+ }
+ var min int64 = math.MaxInt64
+ for _, v := range values {
+ if min > v {
+ min = v
+ }
+ }
+ return min
+}
+
+// SamplePercentiles returns an arbitrary percentile of the slice of int64.
+func SamplePercentile(values int64Slice, p float64) float64 {
+ return SamplePercentiles(values, []float64{p})[0]
+}
+
+// SamplePercentiles returns a slice of arbitrary percentiles of the slice of
+// int64.
+func SamplePercentiles(values int64Slice, ps []float64) []float64 {
+ scores := make([]float64, len(ps))
+ size := len(values)
+ if size > 0 {
+ sort.Sort(values)
+ for i, p := range ps {
+ pos := p * float64(size+1)
+ if pos < 1.0 {
+ scores[i] = float64(values[0])
+ } else if pos >= float64(size) {
+ scores[i] = float64(values[size-1])
+ } else {
+ lower := float64(values[int(pos)-1])
+ upper := float64(values[int(pos)])
+ scores[i] = lower + (pos-math.Floor(pos))*(upper-lower)
+ }
+ }
+ }
+ return scores
+}
+
+// SampleSnapshot is a read-only copy of another Sample.
+type SampleSnapshot struct {
+ count int64
+ values []int64
+}
+
+func NewSampleSnapshot(count int64, values []int64) *SampleSnapshot {
+ return &SampleSnapshot{
+ count: count,
+ values: values,
+ }
+}
+
+// Clear panics.
+func (*SampleSnapshot) Clear() {
+ panic("Clear called on a SampleSnapshot")
+}
+
+// Count returns the count of inputs at the time the snapshot was taken.
+func (s *SampleSnapshot) Count() int64 { return s.count }
+
+// Max returns the maximal value at the time the snapshot was taken.
+func (s *SampleSnapshot) Max() int64 { return SampleMax(s.values) }
+
+// Mean returns the mean value at the time the snapshot was taken.
+func (s *SampleSnapshot) Mean() float64 { return SampleMean(s.values) }
+
+// Min returns the minimal value at the time the snapshot was taken.
+func (s *SampleSnapshot) Min() int64 { return SampleMin(s.values) }
+
+// Percentile returns an arbitrary percentile of values at the time the
+// snapshot was taken.
+func (s *SampleSnapshot) Percentile(p float64) float64 {
+ return SamplePercentile(s.values, p)
+}
+
+// Percentiles returns a slice of arbitrary percentiles of values at the time
+// the snapshot was taken.
+func (s *SampleSnapshot) Percentiles(ps []float64) []float64 {
+ return SamplePercentiles(s.values, ps)
+}
+
+// Size returns the size of the sample at the time the snapshot was taken.
+func (s *SampleSnapshot) Size() int { return len(s.values) }
+
+// Snapshot returns the snapshot.
+func (s *SampleSnapshot) Snapshot() Sample { return s }
+
+// StdDev returns the standard deviation of values at the time the snapshot was
+// taken.
+func (s *SampleSnapshot) StdDev() float64 { return SampleStdDev(s.values) }
+
+// Sum returns the sum of values at the time the snapshot was taken.
+func (s *SampleSnapshot) Sum() int64 { return SampleSum(s.values) }
+
+// Update panics.
+func (*SampleSnapshot) Update(int64) {
+ panic("Update called on a SampleSnapshot")
+}
+
+// Values returns a copy of the values in the sample.
+func (s *SampleSnapshot) Values() []int64 {
+ values := make([]int64, len(s.values))
+ copy(values, s.values)
+ return values
+}
+
+// Variance returns the variance of values at the time the snapshot was taken.
+func (s *SampleSnapshot) Variance() float64 { return SampleVariance(s.values) }
+
+// SampleStdDev returns the standard deviation of the slice of int64.
+func SampleStdDev(values []int64) float64 {
+ return math.Sqrt(SampleVariance(values))
+}
+
+// SampleSum returns the sum of the slice of int64.
+func SampleSum(values []int64) int64 {
+ var sum int64
+ for _, v := range values {
+ sum += v
+ }
+ return sum
+}
+
+// SampleVariance returns the variance of the slice of int64.
+func SampleVariance(values []int64) float64 {
+ if 0 == len(values) {
+ return 0.0
+ }
+ m := SampleMean(values)
+ var sum float64
+ for _, v := range values {
+ d := float64(v) - m
+ sum += d * d
+ }
+ return sum / float64(len(values))
+}
+
+// A uniform sample using Vitter's Algorithm R.
+//
+//
+type UniformSample struct {
+ count int64
+ mutex sync.Mutex
+ reservoirSize int
+ values []int64
+}
+
+// NewUniformSample constructs a new uniform sample with the given reservoir
+// size.
+func NewUniformSample(reservoirSize int) Sample {
+ if UseNilMetrics {
+ return NilSample{}
+ }
+ return &UniformSample{
+ reservoirSize: reservoirSize,
+ values: make([]int64, 0, reservoirSize),
+ }
+}
+
+// Clear clears all samples.
+func (s *UniformSample) Clear() {
+ s.mutex.Lock()
+ defer s.mutex.Unlock()
+ s.count = 0
+ s.values = make([]int64, 0, s.reservoirSize)
+}
+
+// Count returns the number of samples recorded, which may exceed the
+// reservoir size.
+func (s *UniformSample) Count() int64 {
+ s.mutex.Lock()
+ defer s.mutex.Unlock()
+ return s.count
+}
+
+// Max returns the maximum value in the sample, which may not be the maximum
+// value ever to be part of the sample.
+func (s *UniformSample) Max() int64 {
+ s.mutex.Lock()
+ defer s.mutex.Unlock()
+ return SampleMax(s.values)
+}
+
+// Mean returns the mean of the values in the sample.
+func (s *UniformSample) Mean() float64 {
+ s.mutex.Lock()
+ defer s.mutex.Unlock()
+ return SampleMean(s.values)
+}
+
+// Min returns the minimum value in the sample, which may not be the minimum
+// value ever to be part of the sample.
+func (s *UniformSample) Min() int64 {
+ s.mutex.Lock()
+ defer s.mutex.Unlock()
+ return SampleMin(s.values)
+}
+
+// Percentile returns an arbitrary percentile of values in the sample.
+func (s *UniformSample) Percentile(p float64) float64 {
+ s.mutex.Lock()
+ defer s.mutex.Unlock()
+ return SamplePercentile(s.values, p)
+}
+
+// Percentiles returns a slice of arbitrary percentiles of values in the
+// sample.
+func (s *UniformSample) Percentiles(ps []float64) []float64 {
+ s.mutex.Lock()
+ defer s.mutex.Unlock()
+ return SamplePercentiles(s.values, ps)
+}
+
+// Size returns the size of the sample, which is at most the reservoir size.
+func (s *UniformSample) Size() int {
+ s.mutex.Lock()
+ defer s.mutex.Unlock()
+ return len(s.values)
+}
+
+// Snapshot returns a read-only copy of the sample.
+func (s *UniformSample) Snapshot() Sample {
+ s.mutex.Lock()
+ defer s.mutex.Unlock()
+ values := make([]int64, len(s.values))
+ copy(values, s.values)
+ return &SampleSnapshot{
+ count: s.count,
+ values: values,
+ }
+}
+
+// StdDev returns the standard deviation of the values in the sample.
+func (s *UniformSample) StdDev() float64 {
+ s.mutex.Lock()
+ defer s.mutex.Unlock()
+ return SampleStdDev(s.values)
+}
+
+// Sum returns the sum of the values in the sample.
+func (s *UniformSample) Sum() int64 {
+ s.mutex.Lock()
+ defer s.mutex.Unlock()
+ return SampleSum(s.values)
+}
+
+// Update samples a new value.
+func (s *UniformSample) Update(v int64) {
+ s.mutex.Lock()
+ defer s.mutex.Unlock()
+ s.count++
+ if len(s.values) < s.reservoirSize {
+ s.values = append(s.values, v)
+ } else {
+ r := rand.Int63n(s.count)
+ if r < int64(len(s.values)) {
+ s.values[int(r)] = v
+ }
+ }
+}
+
+// Values returns a copy of the values in the sample.
+func (s *UniformSample) Values() []int64 {
+ s.mutex.Lock()
+ defer s.mutex.Unlock()
+ values := make([]int64, len(s.values))
+ copy(values, s.values)
+ return values
+}
+
+// Variance returns the variance of the values in the sample.
+func (s *UniformSample) Variance() float64 {
+ s.mutex.Lock()
+ defer s.mutex.Unlock()
+ return SampleVariance(s.values)
+}
+
+// expDecaySample represents an individual sample in a heap.
+type expDecaySample struct {
+ k float64
+ v int64
+}
+
+func newExpDecaySampleHeap(reservoirSize int) *expDecaySampleHeap {
+ return &expDecaySampleHeap{make([]expDecaySample, 0, reservoirSize)}
+}
+
+// expDecaySampleHeap is a min-heap of expDecaySamples.
+// The internal implementation is copied from the standard library's container/heap
+type expDecaySampleHeap struct {
+ s []expDecaySample
+}
+
+func (h *expDecaySampleHeap) Clear() {
+ h.s = h.s[:0]
+}
+
+func (h *expDecaySampleHeap) Push(s expDecaySample) {
+ n := len(h.s)
+ h.s = h.s[0 : n+1]
+ h.s[n] = s
+ h.up(n)
+}
+
+func (h *expDecaySampleHeap) Pop() expDecaySample {
+ n := len(h.s) - 1
+ h.s[0], h.s[n] = h.s[n], h.s[0]
+ h.down(0, n)
+
+ n = len(h.s)
+ s := h.s[n-1]
+ h.s = h.s[0 : n-1]
+ return s
+}
+
+func (h *expDecaySampleHeap) Size() int {
+ return len(h.s)
+}
+
+func (h *expDecaySampleHeap) Values() []expDecaySample {
+ return h.s
+}
+
+func (h *expDecaySampleHeap) up(j int) {
+ for {
+ i := (j - 1) / 2 // parent
+ if i == j || !(h.s[j].k < h.s[i].k) {
+ break
+ }
+ h.s[i], h.s[j] = h.s[j], h.s[i]
+ j = i
+ }
+}
+
+func (h *expDecaySampleHeap) down(i, n int) {
+ for {
+ j1 := 2*i + 1
+ if j1 >= n || j1 < 0 { // j1 < 0 after int overflow
+ break
+ }
+ j := j1 // left child
+ if j2 := j1 + 1; j2 < n && !(h.s[j1].k < h.s[j2].k) {
+ j = j2 // = 2*i + 2 // right child
+ }
+ if !(h.s[j].k < h.s[i].k) {
+ break
+ }
+ h.s[i], h.s[j] = h.s[j], h.s[i]
+ i = j
+ }
+}
+
+type int64Slice []int64
+
+func (p int64Slice) Len() int { return len(p) }
+func (p int64Slice) Less(i, j int) bool { return p[i] < p[j] }
+func (p int64Slice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
diff --git a/vendor/github.com/rcrowley/go-metrics/syslog.go b/vendor/github.com/rcrowley/go-metrics/syslog.go
new file mode 100644
index 00000000..693f1908
--- /dev/null
+++ b/vendor/github.com/rcrowley/go-metrics/syslog.go
@@ -0,0 +1,78 @@
+// +build !windows
+
+package metrics
+
+import (
+ "fmt"
+ "log/syslog"
+ "time"
+)
+
+// Output each metric in the given registry to syslog periodically using
+// the given syslogger.
+func Syslog(r Registry, d time.Duration, w *syslog.Writer) {
+ for _ = range time.Tick(d) {
+ r.Each(func(name string, i interface{}) {
+ switch metric := i.(type) {
+ case Counter:
+ w.Info(fmt.Sprintf("counter %s: count: %d", name, metric.Count()))
+ case Gauge:
+ w.Info(fmt.Sprintf("gauge %s: value: %d", name, metric.Value()))
+ case GaugeFloat64:
+ w.Info(fmt.Sprintf("gauge %s: value: %f", name, metric.Value()))
+ case Healthcheck:
+ metric.Check()
+ w.Info(fmt.Sprintf("healthcheck %s: error: %v", name, metric.Error()))
+ case Histogram:
+ h := metric.Snapshot()
+ ps := h.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999})
+ w.Info(fmt.Sprintf(
+ "histogram %s: count: %d min: %d max: %d mean: %.2f stddev: %.2f median: %.2f 75%%: %.2f 95%%: %.2f 99%%: %.2f 99.9%%: %.2f",
+ name,
+ h.Count(),
+ h.Min(),
+ h.Max(),
+ h.Mean(),
+ h.StdDev(),
+ ps[0],
+ ps[1],
+ ps[2],
+ ps[3],
+ ps[4],
+ ))
+ case Meter:
+ m := metric.Snapshot()
+ w.Info(fmt.Sprintf(
+ "meter %s: count: %d 1-min: %.2f 5-min: %.2f 15-min: %.2f mean: %.2f",
+ name,
+ m.Count(),
+ m.Rate1(),
+ m.Rate5(),
+ m.Rate15(),
+ m.RateMean(),
+ ))
+ case Timer:
+ t := metric.Snapshot()
+ ps := t.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999})
+ w.Info(fmt.Sprintf(
+ "timer %s: count: %d min: %d max: %d mean: %.2f stddev: %.2f median: %.2f 75%%: %.2f 95%%: %.2f 99%%: %.2f 99.9%%: %.2f 1-min: %.2f 5-min: %.2f 15-min: %.2f mean-rate: %.2f",
+ name,
+ t.Count(),
+ t.Min(),
+ t.Max(),
+ t.Mean(),
+ t.StdDev(),
+ ps[0],
+ ps[1],
+ ps[2],
+ ps[3],
+ ps[4],
+ t.Rate1(),
+ t.Rate5(),
+ t.Rate15(),
+ t.RateMean(),
+ ))
+ }
+ })
+ }
+}
diff --git a/vendor/github.com/rcrowley/go-metrics/timer.go b/vendor/github.com/rcrowley/go-metrics/timer.go
new file mode 100644
index 00000000..17db8f8d
--- /dev/null
+++ b/vendor/github.com/rcrowley/go-metrics/timer.go
@@ -0,0 +1,311 @@
+package metrics
+
+import (
+ "sync"
+ "time"
+)
+
+// Timers capture the duration and rate of events.
+type Timer interface {
+ Count() int64
+ Max() int64
+ Mean() float64
+ Min() int64
+ Percentile(float64) float64
+ Percentiles([]float64) []float64
+ Rate1() float64
+ Rate5() float64
+ Rate15() float64
+ RateMean() float64
+ Snapshot() Timer
+ StdDev() float64
+ Sum() int64
+ Time(func())
+ Update(time.Duration)
+ UpdateSince(time.Time)
+ Variance() float64
+}
+
+// GetOrRegisterTimer returns an existing Timer or constructs and registers a
+// new StandardTimer.
+func GetOrRegisterTimer(name string, r Registry) Timer {
+ if nil == r {
+ r = DefaultRegistry
+ }
+ return r.GetOrRegister(name, NewTimer).(Timer)
+}
+
+// NewCustomTimer constructs a new StandardTimer from a Histogram and a Meter.
+func NewCustomTimer(h Histogram, m Meter) Timer {
+ if UseNilMetrics {
+ return NilTimer{}
+ }
+ return &StandardTimer{
+ histogram: h,
+ meter: m,
+ }
+}
+
+// NewRegisteredTimer constructs and registers a new StandardTimer.
+func NewRegisteredTimer(name string, r Registry) Timer {
+ c := NewTimer()
+ if nil == r {
+ r = DefaultRegistry
+ }
+ r.Register(name, c)
+ return c
+}
+
+// NewTimer constructs a new StandardTimer using an exponentially-decaying
+// sample with the same reservoir size and alpha as UNIX load averages.
+func NewTimer() Timer {
+ if UseNilMetrics {
+ return NilTimer{}
+ }
+ return &StandardTimer{
+ histogram: NewHistogram(NewExpDecaySample(1028, 0.015)),
+ meter: NewMeter(),
+ }
+}
+
+// NilTimer is a no-op Timer.
+type NilTimer struct {
+ h Histogram
+ m Meter
+}
+
+// Count is a no-op.
+func (NilTimer) Count() int64 { return 0 }
+
+// Max is a no-op.
+func (NilTimer) Max() int64 { return 0 }
+
+// Mean is a no-op.
+func (NilTimer) Mean() float64 { return 0.0 }
+
+// Min is a no-op.
+func (NilTimer) Min() int64 { return 0 }
+
+// Percentile is a no-op.
+func (NilTimer) Percentile(p float64) float64 { return 0.0 }
+
+// Percentiles is a no-op.
+func (NilTimer) Percentiles(ps []float64) []float64 {
+ return make([]float64, len(ps))
+}
+
+// Rate1 is a no-op.
+func (NilTimer) Rate1() float64 { return 0.0 }
+
+// Rate5 is a no-op.
+func (NilTimer) Rate5() float64 { return 0.0 }
+
+// Rate15 is a no-op.
+func (NilTimer) Rate15() float64 { return 0.0 }
+
+// RateMean is a no-op.
+func (NilTimer) RateMean() float64 { return 0.0 }
+
+// Snapshot is a no-op.
+func (NilTimer) Snapshot() Timer { return NilTimer{} }
+
+// StdDev is a no-op.
+func (NilTimer) StdDev() float64 { return 0.0 }
+
+// Sum is a no-op.
+func (NilTimer) Sum() int64 { return 0 }
+
+// Time is a no-op.
+func (NilTimer) Time(func()) {}
+
+// Update is a no-op.
+func (NilTimer) Update(time.Duration) {}
+
+// UpdateSince is a no-op.
+func (NilTimer) UpdateSince(time.Time) {}
+
+// Variance is a no-op.
+func (NilTimer) Variance() float64 { return 0.0 }
+
+// StandardTimer is the standard implementation of a Timer and uses a Histogram
+// and Meter.
+type StandardTimer struct {
+ histogram Histogram
+ meter Meter
+ mutex sync.Mutex
+}
+
+// Count returns the number of events recorded.
+func (t *StandardTimer) Count() int64 {
+ return t.histogram.Count()
+}
+
+// Max returns the maximum value in the sample.
+func (t *StandardTimer) Max() int64 {
+ return t.histogram.Max()
+}
+
+// Mean returns the mean of the values in the sample.
+func (t *StandardTimer) Mean() float64 {
+ return t.histogram.Mean()
+}
+
+// Min returns the minimum value in the sample.
+func (t *StandardTimer) Min() int64 {
+ return t.histogram.Min()
+}
+
+// Percentile returns an arbitrary percentile of the values in the sample.
+func (t *StandardTimer) Percentile(p float64) float64 {
+ return t.histogram.Percentile(p)
+}
+
+// Percentiles returns a slice of arbitrary percentiles of the values in the
+// sample.
+func (t *StandardTimer) Percentiles(ps []float64) []float64 {
+ return t.histogram.Percentiles(ps)
+}
+
+// Rate1 returns the one-minute moving average rate of events per second.
+func (t *StandardTimer) Rate1() float64 {
+ return t.meter.Rate1()
+}
+
+// Rate5 returns the five-minute moving average rate of events per second.
+func (t *StandardTimer) Rate5() float64 {
+ return t.meter.Rate5()
+}
+
+// Rate15 returns the fifteen-minute moving average rate of events per second.
+func (t *StandardTimer) Rate15() float64 {
+ return t.meter.Rate15()
+}
+
+// RateMean returns the meter's mean rate of events per second.
+func (t *StandardTimer) RateMean() float64 {
+ return t.meter.RateMean()
+}
+
+// Snapshot returns a read-only copy of the timer.
+func (t *StandardTimer) Snapshot() Timer {
+ t.mutex.Lock()
+ defer t.mutex.Unlock()
+ return &TimerSnapshot{
+ histogram: t.histogram.Snapshot().(*HistogramSnapshot),
+ meter: t.meter.Snapshot().(*MeterSnapshot),
+ }
+}
+
+// StdDev returns the standard deviation of the values in the sample.
+func (t *StandardTimer) StdDev() float64 {
+ return t.histogram.StdDev()
+}
+
+// Sum returns the sum in the sample.
+func (t *StandardTimer) Sum() int64 {
+ return t.histogram.Sum()
+}
+
+// Record the duration of the execution of the given function.
+func (t *StandardTimer) Time(f func()) {
+ ts := time.Now()
+ f()
+ t.Update(time.Since(ts))
+}
+
+// Record the duration of an event.
+func (t *StandardTimer) Update(d time.Duration) {
+ t.mutex.Lock()
+ defer t.mutex.Unlock()
+ t.histogram.Update(int64(d))
+ t.meter.Mark(1)
+}
+
+// Record the duration of an event that started at a time and ends now.
+func (t *StandardTimer) UpdateSince(ts time.Time) {
+ t.mutex.Lock()
+ defer t.mutex.Unlock()
+ t.histogram.Update(int64(time.Since(ts)))
+ t.meter.Mark(1)
+}
+
+// Variance returns the variance of the values in the sample.
+func (t *StandardTimer) Variance() float64 {
+ return t.histogram.Variance()
+}
+
+// TimerSnapshot is a read-only copy of another Timer.
+type TimerSnapshot struct {
+ histogram *HistogramSnapshot
+ meter *MeterSnapshot
+}
+
+// Count returns the number of events recorded at the time the snapshot was
+// taken.
+func (t *TimerSnapshot) Count() int64 { return t.histogram.Count() }
+
+// Max returns the maximum value at the time the snapshot was taken.
+func (t *TimerSnapshot) Max() int64 { return t.histogram.Max() }
+
+// Mean returns the mean value at the time the snapshot was taken.
+func (t *TimerSnapshot) Mean() float64 { return t.histogram.Mean() }
+
+// Min returns the minimum value at the time the snapshot was taken.
+func (t *TimerSnapshot) Min() int64 { return t.histogram.Min() }
+
+// Percentile returns an arbitrary percentile of sampled values at the time the
+// snapshot was taken.
+func (t *TimerSnapshot) Percentile(p float64) float64 {
+ return t.histogram.Percentile(p)
+}
+
+// Percentiles returns a slice of arbitrary percentiles of sampled values at
+// the time the snapshot was taken.
+func (t *TimerSnapshot) Percentiles(ps []float64) []float64 {
+ return t.histogram.Percentiles(ps)
+}
+
+// Rate1 returns the one-minute moving average rate of events per second at the
+// time the snapshot was taken.
+func (t *TimerSnapshot) Rate1() float64 { return t.meter.Rate1() }
+
+// Rate5 returns the five-minute moving average rate of events per second at
+// the time the snapshot was taken.
+func (t *TimerSnapshot) Rate5() float64 { return t.meter.Rate5() }
+
+// Rate15 returns the fifteen-minute moving average rate of events per second
+// at the time the snapshot was taken.
+func (t *TimerSnapshot) Rate15() float64 { return t.meter.Rate15() }
+
+// RateMean returns the meter's mean rate of events per second at the time the
+// snapshot was taken.
+func (t *TimerSnapshot) RateMean() float64 { return t.meter.RateMean() }
+
+// Snapshot returns the snapshot.
+func (t *TimerSnapshot) Snapshot() Timer { return t }
+
+// StdDev returns the standard deviation of the values at the time the snapshot
+// was taken.
+func (t *TimerSnapshot) StdDev() float64 { return t.histogram.StdDev() }
+
+// Sum returns the sum at the time the snapshot was taken.
+func (t *TimerSnapshot) Sum() int64 { return t.histogram.Sum() }
+
+// Time panics.
+func (*TimerSnapshot) Time(func()) {
+ panic("Time called on a TimerSnapshot")
+}
+
+// Update panics.
+func (*TimerSnapshot) Update(time.Duration) {
+ panic("Update called on a TimerSnapshot")
+}
+
+// UpdateSince panics.
+func (*TimerSnapshot) UpdateSince(time.Time) {
+ panic("UpdateSince called on a TimerSnapshot")
+}
+
+// Variance returns the variance of the values at the time the snapshot was
+// taken.
+func (t *TimerSnapshot) Variance() float64 { return t.histogram.Variance() }
diff --git a/vendor/github.com/rcrowley/go-metrics/validate.sh b/vendor/github.com/rcrowley/go-metrics/validate.sh
new file mode 100755
index 00000000..f6499982
--- /dev/null
+++ b/vendor/github.com/rcrowley/go-metrics/validate.sh
@@ -0,0 +1,10 @@
+#!/bin/bash
+
+set -e
+
+# check there are no formatting issues
+GOFMT_LINES=`gofmt -l . | wc -l | xargs`
+test $GOFMT_LINES -eq 0 || echo "gofmt needs to be run, ${GOFMT_LINES} files have issues"
+
+# run the tests for the root package
+go test .
diff --git a/vendor/github.com/rcrowley/go-metrics/writer.go b/vendor/github.com/rcrowley/go-metrics/writer.go
new file mode 100644
index 00000000..091e971d
--- /dev/null
+++ b/vendor/github.com/rcrowley/go-metrics/writer.go
@@ -0,0 +1,100 @@
+package metrics
+
+import (
+ "fmt"
+ "io"
+ "sort"
+ "time"
+)
+
+// Write sorts writes each metric in the given registry periodically to the
+// given io.Writer.
+func Write(r Registry, d time.Duration, w io.Writer) {
+ for _ = range time.Tick(d) {
+ WriteOnce(r, w)
+ }
+}
+
+// WriteOnce sorts and writes metrics in the given registry to the given
+// io.Writer.
+func WriteOnce(r Registry, w io.Writer) {
+ var namedMetrics namedMetricSlice
+ r.Each(func(name string, i interface{}) {
+ namedMetrics = append(namedMetrics, namedMetric{name, i})
+ })
+
+ sort.Sort(namedMetrics)
+ for _, namedMetric := range namedMetrics {
+ switch metric := namedMetric.m.(type) {
+ case Counter:
+ fmt.Fprintf(w, "counter %s\n", namedMetric.name)
+ fmt.Fprintf(w, " count: %9d\n", metric.Count())
+ case Gauge:
+ fmt.Fprintf(w, "gauge %s\n", namedMetric.name)
+ fmt.Fprintf(w, " value: %9d\n", metric.Value())
+ case GaugeFloat64:
+ fmt.Fprintf(w, "gauge %s\n", namedMetric.name)
+ fmt.Fprintf(w, " value: %f\n", metric.Value())
+ case Healthcheck:
+ metric.Check()
+ fmt.Fprintf(w, "healthcheck %s\n", namedMetric.name)
+ fmt.Fprintf(w, " error: %v\n", metric.Error())
+ case Histogram:
+ h := metric.Snapshot()
+ ps := h.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999})
+ fmt.Fprintf(w, "histogram %s\n", namedMetric.name)
+ fmt.Fprintf(w, " count: %9d\n", h.Count())
+ fmt.Fprintf(w, " min: %9d\n", h.Min())
+ fmt.Fprintf(w, " max: %9d\n", h.Max())
+ fmt.Fprintf(w, " mean: %12.2f\n", h.Mean())
+ fmt.Fprintf(w, " stddev: %12.2f\n", h.StdDev())
+ fmt.Fprintf(w, " median: %12.2f\n", ps[0])
+ fmt.Fprintf(w, " 75%%: %12.2f\n", ps[1])
+ fmt.Fprintf(w, " 95%%: %12.2f\n", ps[2])
+ fmt.Fprintf(w, " 99%%: %12.2f\n", ps[3])
+ fmt.Fprintf(w, " 99.9%%: %12.2f\n", ps[4])
+ case Meter:
+ m := metric.Snapshot()
+ fmt.Fprintf(w, "meter %s\n", namedMetric.name)
+ fmt.Fprintf(w, " count: %9d\n", m.Count())
+ fmt.Fprintf(w, " 1-min rate: %12.2f\n", m.Rate1())
+ fmt.Fprintf(w, " 5-min rate: %12.2f\n", m.Rate5())
+ fmt.Fprintf(w, " 15-min rate: %12.2f\n", m.Rate15())
+ fmt.Fprintf(w, " mean rate: %12.2f\n", m.RateMean())
+ case Timer:
+ t := metric.Snapshot()
+ ps := t.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999})
+ fmt.Fprintf(w, "timer %s\n", namedMetric.name)
+ fmt.Fprintf(w, " count: %9d\n", t.Count())
+ fmt.Fprintf(w, " min: %9d\n", t.Min())
+ fmt.Fprintf(w, " max: %9d\n", t.Max())
+ fmt.Fprintf(w, " mean: %12.2f\n", t.Mean())
+ fmt.Fprintf(w, " stddev: %12.2f\n", t.StdDev())
+ fmt.Fprintf(w, " median: %12.2f\n", ps[0])
+ fmt.Fprintf(w, " 75%%: %12.2f\n", ps[1])
+ fmt.Fprintf(w, " 95%%: %12.2f\n", ps[2])
+ fmt.Fprintf(w, " 99%%: %12.2f\n", ps[3])
+ fmt.Fprintf(w, " 99.9%%: %12.2f\n", ps[4])
+ fmt.Fprintf(w, " 1-min rate: %12.2f\n", t.Rate1())
+ fmt.Fprintf(w, " 5-min rate: %12.2f\n", t.Rate5())
+ fmt.Fprintf(w, " 15-min rate: %12.2f\n", t.Rate15())
+ fmt.Fprintf(w, " mean rate: %12.2f\n", t.RateMean())
+ }
+ }
+}
+
+type namedMetric struct {
+ name string
+ m interface{}
+}
+
+// namedMetricSlice is a slice of namedMetrics that implements sort.Interface.
+type namedMetricSlice []namedMetric
+
+func (nms namedMetricSlice) Len() int { return len(nms) }
+
+func (nms namedMetricSlice) Swap(i, j int) { nms[i], nms[j] = nms[j], nms[i] }
+
+func (nms namedMetricSlice) Less(i, j int) bool {
+ return nms[i].name < nms[j].name
+}
diff --git a/vendor/github.com/smartystreets/assertions/doc.go b/vendor/github.com/smartystreets/assertions/doc.go
index 50572325..43ceb6e9 100644
--- a/vendor/github.com/smartystreets/assertions/doc.go
+++ b/vendor/github.com/smartystreets/assertions/doc.go
@@ -82,8 +82,6 @@ func (this *Assertion) So(actual interface{}, assert assertion, expected ...inte
// log.Println(message)
// }
//
-// For an alternative implementation of So (that provides more flexible return options)
-// see the `So` function in the package at github.com/smartystreets/assertions/assert.
func So(actual interface{}, assert assertion, expected ...interface{}) (bool, string) {
if result := so(actual, assert, expected...); len(result) == 0 {
return true, result
diff --git a/vendor/github.com/smartystreets/assertions/equal_method.go b/vendor/github.com/smartystreets/assertions/equal_method.go
deleted file mode 100644
index c4fc38fa..00000000
--- a/vendor/github.com/smartystreets/assertions/equal_method.go
+++ /dev/null
@@ -1,75 +0,0 @@
-package assertions
-
-import "reflect"
-
-type equalityMethodSpecification struct {
- a interface{}
- b interface{}
-
- aType reflect.Type
- bType reflect.Type
-
- equalMethod reflect.Value
-}
-
-func newEqualityMethodSpecification(a, b interface{}) *equalityMethodSpecification {
- return &equalityMethodSpecification{
- a: a,
- b: b,
- }
-}
-
-func (this *equalityMethodSpecification) IsSatisfied() bool {
- if !this.bothAreSameType() {
- return false
- }
- if !this.typeHasEqualMethod() {
- return false
- }
- if !this.equalMethodReceivesSameTypeForComparison() {
- return false
- }
- if !this.equalMethodReturnsBool() {
- return false
- }
- return true
-}
-
-func (this *equalityMethodSpecification) bothAreSameType() bool {
- this.aType = reflect.TypeOf(this.a)
- if this.aType == nil {
- return false
- }
- if this.aType.Kind() == reflect.Ptr {
- this.aType = this.aType.Elem()
- }
- this.bType = reflect.TypeOf(this.b)
- return this.aType == this.bType
-}
-func (this *equalityMethodSpecification) typeHasEqualMethod() bool {
- aInstance := reflect.ValueOf(this.a)
- this.equalMethod = aInstance.MethodByName("Equal")
- return this.equalMethod != reflect.Value{}
-}
-
-func (this *equalityMethodSpecification) equalMethodReceivesSameTypeForComparison() bool {
- signature := this.equalMethod.Type()
- return signature.NumIn() == 1 && signature.In(0) == this.aType
-}
-
-func (this *equalityMethodSpecification) equalMethodReturnsBool() bool {
- signature := this.equalMethod.Type()
- return signature.NumOut() == 1 && signature.Out(0) == reflect.TypeOf(true)
-}
-
-func (this *equalityMethodSpecification) AreEqual() bool {
- a := reflect.ValueOf(this.a)
- b := reflect.ValueOf(this.b)
- return areEqual(a, b) && areEqual(b, a)
-}
-func areEqual(receiver reflect.Value, argument reflect.Value) bool {
- equalMethod := receiver.MethodByName("Equal")
- argumentList := []reflect.Value{argument}
- result := equalMethod.Call(argumentList)
- return result[0].Bool()
-}
diff --git a/vendor/github.com/smartystreets/assertions/equality.go b/vendor/github.com/smartystreets/assertions/equality.go
index 0a4b1f3d..1e5957e2 100644
--- a/vendor/github.com/smartystreets/assertions/equality.go
+++ b/vendor/github.com/smartystreets/assertions/equality.go
@@ -11,11 +11,7 @@ import (
"github.com/smartystreets/assertions/internal/oglematchers"
)
-// ShouldEqual receives exactly two parameters and does an equality check
-// using the following semantics:
-// 1. If the expected and actual values implement an Equal method in the form
-// `func (this T) Equal(that T) bool` then call the method. If true, they are equal.
-// 2. The expected and actual values are judged equal or not by oglematchers.Equals.
+// ShouldEqual receives exactly two parameters and does an equality check.
func ShouldEqual(actual interface{}, expected ...interface{}) string {
if message := need(1, expected); message != success {
return message
@@ -26,17 +22,10 @@ func shouldEqual(actual, expected interface{}) (message string) {
defer func() {
if r := recover(); r != nil {
message = serializer.serialize(expected, actual, fmt.Sprintf(shouldHaveBeenEqual, expected, actual))
+ return
}
}()
- if specification := newEqualityMethodSpecification(expected, actual); specification.IsSatisfied() {
- if specification.AreEqual() {
- return success
- } else {
- message = fmt.Sprintf(shouldHaveBeenEqual, expected, actual)
- return serializer.serialize(expected, actual, message)
- }
- }
if matchError := oglematchers.Equals(expected).Matches(actual); matchError != nil {
expectedSyntax := fmt.Sprintf("%v", expected)
actualSyntax := fmt.Sprintf("%v", actual)
@@ -45,14 +34,14 @@ func shouldEqual(actual, expected interface{}) (message string) {
} else {
message = fmt.Sprintf(shouldHaveBeenEqual, expected, actual)
}
- return serializer.serialize(expected, actual, message)
+ message = serializer.serialize(expected, actual, message)
+ return
}
return success
}
// ShouldNotEqual receives exactly two parameters and does an inequality check.
-// See ShouldEqual for details on how equality is determined.
func ShouldNotEqual(actual interface{}, expected ...interface{}) string {
if fail := need(1, expected); fail != success {
return fail
diff --git a/vendor/github.com/smartystreets/assertions/type.go b/vendor/github.com/smartystreets/assertions/type.go
index d2d1dc86..25e0ed09 100644
--- a/vendor/github.com/smartystreets/assertions/type.go
+++ b/vendor/github.com/smartystreets/assertions/type.go
@@ -14,10 +14,9 @@ func ShouldHaveSameTypeAs(actual interface{}, expected ...interface{}) string {
first := reflect.TypeOf(actual)
second := reflect.TypeOf(expected[0])
- if first != second {
+ if equal := ShouldEqual(first, second); equal != success {
return serializer.serialize(second, first, fmt.Sprintf(shouldHaveBeenA, actual, second, first))
}
-
return success
}
@@ -30,7 +29,7 @@ func ShouldNotHaveSameTypeAs(actual interface{}, expected ...interface{}) string
first := reflect.TypeOf(actual)
second := reflect.TypeOf(expected[0])
- if (actual == nil && expected[0] == nil) || first == second {
+ if equal := ShouldEqual(first, second); equal == success {
return fmt.Sprintf(shouldNotHaveBeenA, actual, second)
}
return success
diff --git a/vendor/vendor.json b/vendor/vendor.json
index 6da0cc66..96583cbb 100644
--- a/vendor/vendor.json
+++ b/vendor/vendor.json
@@ -2,35 +2,103 @@
"comment": "",
"ignore": "test",
"package": [
+ {
+ "checksumSHA1": "WI5P8MbnHHGqzuQje+qFoQSUHgk=",
+ "path": "github.com/ONSdigital/go-ns/avro",
+ "revision": "6920413b753350672215a083e0f9d5c270a21075",
+ "revisionTime": "2017-11-28T09:28:02Z"
+ },
{
"checksumSHA1": "J9OiPnhPHFPhQ+R+dq6LFj+dTxE=",
"path": "github.com/ONSdigital/go-ns/handlers/requestID",
- "revision": "c45be7d1a146fdaf45f65304f09c7952a9aaf2cb",
- "revisionTime": "2017-12-08T11:11:03Z"
+ "revision": "6920413b753350672215a083e0f9d5c270a21075",
+ "revisionTime": "2017-11-28T09:28:02Z"
+ },
+ {
+ "checksumSHA1": "d0xkjPw9SKVWK1vDJqvD3HoFbnA=",
+ "path": "github.com/ONSdigital/go-ns/kafka",
+ "revision": "6920413b753350672215a083e0f9d5c270a21075",
+ "revisionTime": "2017-11-28T09:28:02Z"
},
{
"checksumSHA1": "AjbjbhFVAOP/1NU5HL+uy+X/yJo=",
"path": "github.com/ONSdigital/go-ns/log",
- "revision": "c45be7d1a146fdaf45f65304f09c7952a9aaf2cb",
- "revisionTime": "2017-12-08T11:11:03Z"
+ "revision": "6920413b753350672215a083e0f9d5c270a21075",
+ "revisionTime": "2017-11-28T09:28:02Z"
},
{
"checksumSHA1": "4TZpshhXxRKFxKy3PIok84P5cvo=",
"path": "github.com/ONSdigital/go-ns/mongo",
- "revision": "c45be7d1a146fdaf45f65304f09c7952a9aaf2cb",
- "revisionTime": "2017-12-08T11:11:03Z"
+ "revision": "6920413b753350672215a083e0f9d5c270a21075",
+ "revisionTime": "2017-11-28T09:28:02Z"
},
{
"checksumSHA1": "GQdzMpAMb42KQQ/GsJFSRU5dj1Y=",
"path": "github.com/ONSdigital/go-ns/server",
- "revision": "c45be7d1a146fdaf45f65304f09c7952a9aaf2cb",
- "revisionTime": "2017-12-08T11:11:03Z"
+ "revision": "6920413b753350672215a083e0f9d5c270a21075",
+ "revisionTime": "2017-11-28T09:28:02Z"
+ },
+ {
+ "checksumSHA1": "+Jp0tVXfQ1TM8T+oun82oJtME5U=",
+ "origin": "github.com/ONSdigital/go-ns/vendor/github.com/Shopify/sarama",
+ "path": "github.com/Shopify/sarama",
+ "revision": "6920413b753350672215a083e0f9d5c270a21075",
+ "revisionTime": "2017-11-28T09:28:02Z"
+ },
+ {
+ "checksumSHA1": "88PtOr8dvh0n877kqDsDVZ73MmY=",
+ "origin": "github.com/ONSdigital/go-ns/vendor/github.com/bsm/sarama-cluster",
+ "path": "github.com/bsm/sarama-cluster",
+ "revision": "6920413b753350672215a083e0f9d5c270a21075",
+ "revisionTime": "2017-11-28T09:28:02Z"
+ },
+ {
+ "checksumSHA1": "7UT+dVVfOn8IA79ITFiGNMb2c/4=",
+ "origin": "github.com/ONSdigital/go-ns/vendor/github.com/davecgh/go-spew/spew",
+ "path": "github.com/davecgh/go-spew/spew",
+ "revision": "6920413b753350672215a083e0f9d5c270a21075",
+ "revisionTime": "2017-11-28T09:28:02Z"
+ },
+ {
+ "checksumSHA1": "y2Kh4iPlgCPXSGTCcFpzePYdzzg=",
+ "origin": "github.com/ONSdigital/go-ns/vendor/github.com/eapache/go-resiliency/breaker",
+ "path": "github.com/eapache/go-resiliency/breaker",
+ "revision": "6920413b753350672215a083e0f9d5c270a21075",
+ "revisionTime": "2017-11-28T09:28:02Z"
+ },
+ {
+ "checksumSHA1": "WHl96RVZlOOdF4Lb1OOadMpw8ls=",
+ "origin": "github.com/ONSdigital/go-ns/vendor/github.com/eapache/go-xerial-snappy",
+ "path": "github.com/eapache/go-xerial-snappy",
+ "revision": "6920413b753350672215a083e0f9d5c270a21075",
+ "revisionTime": "2017-11-28T09:28:02Z"
+ },
+ {
+ "checksumSHA1": "oCCs6kDanizatplM5e/hX76busE=",
+ "origin": "github.com/ONSdigital/go-ns/vendor/github.com/eapache/queue",
+ "path": "github.com/eapache/queue",
+ "revision": "6920413b753350672215a083e0f9d5c270a21075",
+ "revisionTime": "2017-11-28T09:28:02Z"
+ },
+ {
+ "checksumSHA1": "I6MnUzkLJt+sh73CodD0NKswFrs=",
+ "origin": "github.com/ONSdigital/go-ns/vendor/github.com/go-avro/avro",
+ "path": "github.com/go-avro/avro",
+ "revision": "6920413b753350672215a083e0f9d5c270a21075",
+ "revisionTime": "2017-11-28T09:28:02Z"
+ },
+ {
+ "checksumSHA1": "p/8vSviYF91gFflhrt5vkyksroo=",
+ "origin": "github.com/ONSdigital/go-ns/vendor/github.com/golang/snappy",
+ "path": "github.com/golang/snappy",
+ "revision": "6920413b753350672215a083e0f9d5c270a21075",
+ "revisionTime": "2017-11-28T09:28:02Z"
},
{
"checksumSHA1": "yIkYzW7bzAD81zHyuCNmEj4+oxQ=",
"path": "github.com/gopherjs/gopherjs/js",
- "revision": "2b1d432c8a82c9bff0b0baffaeb3ec6e92974112",
- "revisionTime": "2017-07-02T15:34:43Z"
+ "revision": "444abdf920945de5d4a977b572bcc6c674d1e4eb",
+ "revisionTime": "2017-11-02T03:40:23Z"
},
{
"checksumSHA1": "g/V4qrXjUGG9B+e3hB+4NAYJ5Gs=",
@@ -39,10 +107,10 @@
"revisionTime": "2016-08-17T18:46:32Z"
},
{
- "checksumSHA1": "3HFUqdhX43kNPJe18WJWCVkthKI=",
+ "checksumSHA1": "hIbmXmNbwyP44fi1hh6zJrMcYws=",
"path": "github.com/gorilla/mux",
- "revision": "2d5fef06b891c971b14aa6f71ca5ab6c03a36e0e",
- "revisionTime": "2017-11-09T03:54:02Z"
+ "revision": "ac112f7d75a0714af1bd86ab17749b31f7809640",
+ "revisionTime": "2017-07-03T15:07:09Z"
},
{
"checksumSHA1": "Js/yx9fZ3+wH1wZpHNIxSTMIaCg=",
@@ -54,8 +122,8 @@
"checksumSHA1": "OBvAHqWjdI4NQVAqTkcQAdTuCFY=",
"origin": "github.com/ONSdigital/go-ns/vendor/github.com/justinas/alice",
"path": "github.com/justinas/alice",
- "revision": "c45be7d1a146fdaf45f65304f09c7952a9aaf2cb",
- "revisionTime": "2017-12-08T11:11:03Z"
+ "revision": "6920413b753350672215a083e0f9d5c270a21075",
+ "revisionTime": "2017-11-28T09:28:02Z"
},
{
"checksumSHA1": "Jrjxy16tD9mUgr/jbhXwbHVeSa0=",
@@ -67,8 +135,35 @@
"checksumSHA1": "hBgLmZ/4mCxmnH88mqFKBkpJFUY=",
"origin": "github.com/ONSdigital/go-ns/vendor/github.com/mgutz/ansi",
"path": "github.com/mgutz/ansi",
- "revision": "c45be7d1a146fdaf45f65304f09c7952a9aaf2cb",
- "revisionTime": "2017-12-08T11:11:03Z"
+ "revision": "6920413b753350672215a083e0f9d5c270a21075",
+ "revisionTime": "2017-11-28T09:28:02Z"
+ },
+ {
+ "checksumSHA1": "FGg99nQ56Fo3radSCuU1AeEUJug=",
+ "origin": "github.com/ONSdigital/go-ns/vendor/github.com/pierrec/lz4",
+ "path": "github.com/pierrec/lz4",
+ "revision": "6920413b753350672215a083e0f9d5c270a21075",
+ "revisionTime": "2017-11-28T09:28:02Z"
+ },
+ {
+ "checksumSHA1": "IT4sX58d+e8osXHV5U6YCSdB/uE=",
+ "origin": "github.com/ONSdigital/go-ns/vendor/github.com/pierrec/xxHash/xxHash32",
+ "path": "github.com/pierrec/xxHash/xxHash32",
+ "revision": "6920413b753350672215a083e0f9d5c270a21075",
+ "revisionTime": "2017-11-28T09:28:02Z"
+ },
+ {
+ "checksumSHA1": "rJab1YdNhQooDiBWNnt7TLWPyBU=",
+ "path": "github.com/pkg/errors",
+ "revision": "c605e284fe17294bda444b34710735b29d1a9d90",
+ "revisionTime": "2017-05-05T04:36:39Z"
+ },
+ {
+ "checksumSHA1": "KAzbLjI9MzW2tjfcAsK75lVRp6I=",
+ "origin": "github.com/ONSdigital/go-ns/vendor/github.com/rcrowley/go-metrics",
+ "path": "github.com/rcrowley/go-metrics",
+ "revision": "6920413b753350672215a083e0f9d5c270a21075",
+ "revisionTime": "2017-11-28T09:28:02Z"
},
{
"checksumSHA1": "zmC8/3V4ls53DJlNTKDZwPSC/dA=",
@@ -77,22 +172,22 @@
"revisionTime": "2017-03-21T23:07:31Z"
},
{
- "checksumSHA1": "ozRFnFdTG0IjkTE0RkZn71XO4gw=",
+ "checksumSHA1": "AikWjAOvWHSJ8G1iU+wNReZnyCk=",
"path": "github.com/smartystreets/assertions",
- "revision": "0b37b35ec7434b77e77a4bb29b79677cced992ea",
- "revisionTime": "2017-09-25T17:21:51Z"
+ "revision": "4ea54c1f28ad3ae597e76607dea3871fa177e263",
+ "revisionTime": "2017-06-07T22:27:57Z"
},
{
"checksumSHA1": "Vzb+dEH/LTYbvr8RXHmt6xJHz04=",
"path": "github.com/smartystreets/assertions/internal/go-render/render",
- "revision": "0b37b35ec7434b77e77a4bb29b79677cced992ea",
- "revisionTime": "2017-09-25T17:21:51Z"
+ "revision": "4ea54c1f28ad3ae597e76607dea3871fa177e263",
+ "revisionTime": "2017-06-07T22:27:57Z"
},
{
"checksumSHA1": "r6FauVdOTFnwYQgrKGFuWUbIAJE=",
"path": "github.com/smartystreets/assertions/internal/oglematchers",
- "revision": "0b37b35ec7434b77e77a4bb29b79677cced992ea",
- "revisionTime": "2017-09-25T17:21:51Z"
+ "revision": "4ea54c1f28ad3ae597e76607dea3871fa177e263",
+ "revisionTime": "2017-06-07T22:27:57Z"
},
{
"checksumSHA1": "f4m09DHEetaanti/GqUJzyCBTaI=",