From 9aa80e8200c0fb6990a48c5eb254e248a2706017 Mon Sep 17 00:00:00 2001 From: Eleanor Deal Date: Fri, 12 Jul 2019 14:43:10 +0100 Subject: [PATCH 01/15] Update vendor to include neptune implementation for dp-graph --- Makefile | 2 +- .../ONSdigital/dp-graph/config/config.go | 13 +- .../dp-graph/graph/driver/errors.go | 4 + .../ONSdigital/dp-graph/graph/graph.go | 7 +- .../ONSdigital/dp-graph/neo4j/codelists.go | 2 +- .../ONSdigital/dp-graph/neo4j/neo4j.go | 2 +- .../neo4j/{driver => neo4jdriver}/driver.go | 2 +- .../{driver => neo4jdriver}/healthcheck.go | 2 +- .../{driver => neo4jdriver}/row_reader.go | 2 +- .../ONSdigital/dp-graph/neo4j/query/query.go | 1 - .../ONSdigital/dp-graph/neptune/codelist.go | 229 ++++ .../dp-graph/neptune/codelistsdataset.go | 165 +++ .../ONSdigital/dp-graph/neptune/dimension.go | 11 + .../dp-graph/neptune/driver/driver.go | 23 + .../dp-graph/neptune/driver/healthcheck.go | 14 + .../dp-graph/neptune/driver/neptunepool.go | 23 + .../ONSdigital/dp-graph/neptune/hierarchy.go | 274 ++++ .../ONSdigital/dp-graph/neptune/instance.go | 39 + .../neptune/internal/mockpoolutils.go | 184 +++ .../dp-graph/neptune/internal/pool.go | 408 ++++++ .../ONSdigital/dp-graph/neptune/mapper.go | 83 ++ .../dp-graph/neptune/mockedneptune.go | 24 + .../ONSdigital/dp-graph/neptune/neptune.go | 258 ++++ .../dp-graph/neptune/observation.go | 62 + .../dp-graph/neptune/query/query.go | 96 ++ .../observation/observationtest/row_reader.go | 59 +- .../ONSdigital/dp-graph/observation/reader.go | 6 +- .../gremgo-neptune/Dockerfile.gremlin | 1 + .../ONSdigital/gremgo-neptune/LICENSE.md | 9 + .../ONSdigital/gremgo-neptune/Makefile | 21 + .../ONSdigital/gremgo-neptune/README.md | 41 + .../ONSdigital/gremgo-neptune/TODO.md | 8 + .../ONSdigital/gremgo-neptune/client.go | 501 +++++++ .../gremgo-neptune/configuration.go | 42 + .../ONSdigital/gremgo-neptune/connection.go | 270 ++++ .../ONSdigital/gremgo-neptune/cursor.go | 85 ++ .../ONSdigital/gremgo-neptune/go.mod | 9 + .../ONSdigital/gremgo-neptune/go.sum | 21 + .../ONSdigital/gremgo-neptune/pool.go | 524 ++++++++ .../ONSdigital/gremgo-neptune/request.go | 96 ++ .../ONSdigital/gremgo-neptune/response.go | 231 ++++ .../ONSdigital/gremgo-neptune/tags.go | 42 + .../github.com/gedge/graphson/deserialize.go | 246 ++++ vendor/github.com/gedge/graphson/types.go | 153 +++ vendor/github.com/gedge/graphson/utils.go | 238 ++++ .../gedge/graphson/validation_utils.go | 94 ++ vendor/github.com/gofrs/uuid/LICENSE | 20 + vendor/github.com/gofrs/uuid/README.md | 109 ++ vendor/github.com/gofrs/uuid/codec.go | 212 +++ vendor/github.com/gofrs/uuid/fuzz.go | 47 + vendor/github.com/gofrs/uuid/generator.go | 299 +++++ vendor/github.com/gofrs/uuid/sql.go | 109 ++ vendor/github.com/gofrs/uuid/uuid.go | 250 ++++ vendor/github.com/gorilla/websocket/AUTHORS | 9 + vendor/github.com/gorilla/websocket/LICENSE | 22 + vendor/github.com/gorilla/websocket/README.md | 64 + vendor/github.com/gorilla/websocket/client.go | 395 ++++++ .../gorilla/websocket/client_clone.go | 16 + .../gorilla/websocket/client_clone_legacy.go | 38 + .../gorilla/websocket/compression.go | 148 +++ vendor/github.com/gorilla/websocket/conn.go | 1163 +++++++++++++++++ .../gorilla/websocket/conn_write.go | 15 + .../gorilla/websocket/conn_write_legacy.go | 18 + vendor/github.com/gorilla/websocket/doc.go | 227 ++++ vendor/github.com/gorilla/websocket/go.mod | 1 + vendor/github.com/gorilla/websocket/go.sum | 2 + vendor/github.com/gorilla/websocket/join.go | 42 + vendor/github.com/gorilla/websocket/json.go | 60 + vendor/github.com/gorilla/websocket/mask.go | 54 + .../github.com/gorilla/websocket/mask_safe.go | 15 + .../github.com/gorilla/websocket/prepared.go | 102 ++ vendor/github.com/gorilla/websocket/proxy.go | 77 ++ vendor/github.com/gorilla/websocket/server.go | 363 +++++ vendor/github.com/gorilla/websocket/trace.go | 19 + .../github.com/gorilla/websocket/trace_17.go | 12 + vendor/github.com/gorilla/websocket/util.go | 283 ++++ .../gorilla/websocket/x_net_proxy.go | 473 +++++++ vendor/vendor.json | 110 +- 78 files changed, 9327 insertions(+), 74 deletions(-) rename vendor/github.com/ONSdigital/dp-graph/neo4j/{driver => neo4jdriver}/driver.go (99%) rename vendor/github.com/ONSdigital/dp-graph/neo4j/{driver => neo4jdriver}/healthcheck.go (95%) rename vendor/github.com/ONSdigital/dp-graph/neo4j/{driver => neo4jdriver}/row_reader.go (98%) create mode 100644 vendor/github.com/ONSdigital/dp-graph/neptune/codelist.go create mode 100644 vendor/github.com/ONSdigital/dp-graph/neptune/codelistsdataset.go create mode 100644 vendor/github.com/ONSdigital/dp-graph/neptune/dimension.go create mode 100644 vendor/github.com/ONSdigital/dp-graph/neptune/driver/driver.go create mode 100644 vendor/github.com/ONSdigital/dp-graph/neptune/driver/healthcheck.go create mode 100644 vendor/github.com/ONSdigital/dp-graph/neptune/driver/neptunepool.go create mode 100644 vendor/github.com/ONSdigital/dp-graph/neptune/hierarchy.go create mode 100644 vendor/github.com/ONSdigital/dp-graph/neptune/instance.go create mode 100644 vendor/github.com/ONSdigital/dp-graph/neptune/internal/mockpoolutils.go create mode 100644 vendor/github.com/ONSdigital/dp-graph/neptune/internal/pool.go create mode 100644 vendor/github.com/ONSdigital/dp-graph/neptune/mapper.go create mode 100644 vendor/github.com/ONSdigital/dp-graph/neptune/mockedneptune.go create mode 100644 vendor/github.com/ONSdigital/dp-graph/neptune/neptune.go create mode 100644 vendor/github.com/ONSdigital/dp-graph/neptune/observation.go create mode 100644 vendor/github.com/ONSdigital/dp-graph/neptune/query/query.go create mode 100644 vendor/github.com/ONSdigital/gremgo-neptune/Dockerfile.gremlin create mode 100644 vendor/github.com/ONSdigital/gremgo-neptune/LICENSE.md create mode 100644 vendor/github.com/ONSdigital/gremgo-neptune/Makefile create mode 100644 vendor/github.com/ONSdigital/gremgo-neptune/README.md create mode 100644 vendor/github.com/ONSdigital/gremgo-neptune/TODO.md create mode 100644 vendor/github.com/ONSdigital/gremgo-neptune/client.go create mode 100644 vendor/github.com/ONSdigital/gremgo-neptune/configuration.go create mode 100644 vendor/github.com/ONSdigital/gremgo-neptune/connection.go create mode 100644 vendor/github.com/ONSdigital/gremgo-neptune/cursor.go create mode 100644 vendor/github.com/ONSdigital/gremgo-neptune/go.mod create mode 100644 vendor/github.com/ONSdigital/gremgo-neptune/go.sum create mode 100644 vendor/github.com/ONSdigital/gremgo-neptune/pool.go create mode 100644 vendor/github.com/ONSdigital/gremgo-neptune/request.go create mode 100644 vendor/github.com/ONSdigital/gremgo-neptune/response.go create mode 100644 vendor/github.com/ONSdigital/gremgo-neptune/tags.go create mode 100644 vendor/github.com/gedge/graphson/deserialize.go create mode 100644 vendor/github.com/gedge/graphson/types.go create mode 100644 vendor/github.com/gedge/graphson/utils.go create mode 100644 vendor/github.com/gedge/graphson/validation_utils.go create mode 100644 vendor/github.com/gofrs/uuid/LICENSE create mode 100644 vendor/github.com/gofrs/uuid/README.md create mode 100644 vendor/github.com/gofrs/uuid/codec.go create mode 100644 vendor/github.com/gofrs/uuid/fuzz.go create mode 100644 vendor/github.com/gofrs/uuid/generator.go create mode 100644 vendor/github.com/gofrs/uuid/sql.go create mode 100644 vendor/github.com/gofrs/uuid/uuid.go create mode 100644 vendor/github.com/gorilla/websocket/AUTHORS create mode 100644 vendor/github.com/gorilla/websocket/LICENSE create mode 100644 vendor/github.com/gorilla/websocket/README.md create mode 100644 vendor/github.com/gorilla/websocket/client.go create mode 100644 vendor/github.com/gorilla/websocket/client_clone.go create mode 100644 vendor/github.com/gorilla/websocket/client_clone_legacy.go create mode 100644 vendor/github.com/gorilla/websocket/compression.go create mode 100644 vendor/github.com/gorilla/websocket/conn.go create mode 100644 vendor/github.com/gorilla/websocket/conn_write.go create mode 100644 vendor/github.com/gorilla/websocket/conn_write_legacy.go create mode 100644 vendor/github.com/gorilla/websocket/doc.go create mode 100644 vendor/github.com/gorilla/websocket/go.mod create mode 100644 vendor/github.com/gorilla/websocket/go.sum create mode 100644 vendor/github.com/gorilla/websocket/join.go create mode 100644 vendor/github.com/gorilla/websocket/json.go create mode 100644 vendor/github.com/gorilla/websocket/mask.go create mode 100644 vendor/github.com/gorilla/websocket/mask_safe.go create mode 100644 vendor/github.com/gorilla/websocket/prepared.go create mode 100644 vendor/github.com/gorilla/websocket/proxy.go create mode 100644 vendor/github.com/gorilla/websocket/server.go create mode 100644 vendor/github.com/gorilla/websocket/trace.go create mode 100644 vendor/github.com/gorilla/websocket/trace_17.go create mode 100644 vendor/github.com/gorilla/websocket/util.go create mode 100644 vendor/github.com/gorilla/websocket/x_net_proxy.go diff --git a/Makefile b/Makefile index 0767559f..51f98f8d 100644 --- a/Makefile +++ b/Makefile @@ -13,7 +13,7 @@ build: @mkdir -p $(BUILD_ARCH)/$(BIN_DIR) go build -o $(BUILD_ARCH)/$(BIN_DIR)/dp-dataset-api main.go debug: - GRAPH_DRIVER_TYPE="neo4j" GRAPH_ADDR="bolt://localhost:7687" HUMAN_LOG=1 go run main.go + GRAPH_DRIVER_TYPE="neptune" GRAPH_ADDR="ws://localhost:8182/gremlin" HUMAN_LOG=1 go run main.go acceptance-publishing: build ENABLE_PRIVATE_ENDPOINTS=true MONGODB_DATABASE=test HUMAN_LOG=1 go run main.go acceptance-web: build diff --git a/vendor/github.com/ONSdigital/dp-graph/config/config.go b/vendor/github.com/ONSdigital/dp-graph/config/config.go index 4fe8af60..99594a25 100644 --- a/vendor/github.com/ONSdigital/dp-graph/config/config.go +++ b/vendor/github.com/ONSdigital/dp-graph/config/config.go @@ -6,6 +6,7 @@ import ( "github.com/ONSdigital/dp-graph/graph/driver" "github.com/ONSdigital/dp-graph/mock" "github.com/ONSdigital/dp-graph/neo4j" + "github.com/ONSdigital/dp-graph/neptune" "github.com/kelseyhightower/envconfig" ) @@ -16,7 +17,7 @@ type Configuration struct { DatabaseAddress string `envconfig:"GRAPH_ADDR"` PoolSize int `envconfig:"GRAPH_POOL_SIZE"` MaxRetries int `envconfig:"MAX_RETRIES"` - QueryTimeout int `envconfig:"QUERY_TIMEOUT"` + QueryTimeout int `envconfig:"GRAPH_QUERY_TIMEOUT"` Driver driver.Driver } @@ -24,7 +25,7 @@ type Configuration struct { var cfg *Configuration // Get reads config and returns the configured instantiated driver -func Get() (*Configuration, error) { +func Get(errs chan error) (*Configuration, error) { if cfg != nil { return cfg, nil } @@ -43,9 +44,11 @@ func Get() (*Configuration, error) { if err != nil { return nil, err } - // - // case "gremgo": - // d = gremgo.Driver{} + case "neptune": + d, err = neptune.New(cfg.DatabaseAddress, cfg.PoolSize, cfg.QueryTimeout, cfg.MaxRetries, errs) + if err != nil { + return nil, err + } case "mock": d = &mock.Mock{} default: diff --git a/vendor/github.com/ONSdigital/dp-graph/graph/driver/errors.go b/vendor/github.com/ONSdigital/dp-graph/graph/driver/errors.go index 75cfd714..5bc2e1e3 100644 --- a/vendor/github.com/ONSdigital/dp-graph/graph/driver/errors.go +++ b/vendor/github.com/ONSdigital/dp-graph/graph/driver/errors.go @@ -8,6 +8,10 @@ import ( // ErrNotFound is returned when the result set from the database held 0 records var ErrNotFound = errors.New("not found") +// ErrMultipleFound is returned when the result set from the database holds +// more than one error, inside a call that requires exactly one. +var ErrMultipleFound = errors.New("multiple found where should be one") + // ErrAttemptsExceededLimit is returned when the number of attempts has reaced // the maximum permitted type ErrAttemptsExceededLimit struct { diff --git a/vendor/github.com/ONSdigital/dp-graph/graph/graph.go b/vendor/github.com/ONSdigital/dp-graph/graph/graph.go index e2d313a5..6cbf4d39 100644 --- a/vendor/github.com/ONSdigital/dp-graph/graph/graph.go +++ b/vendor/github.com/ONSdigital/dp-graph/graph/graph.go @@ -19,6 +19,8 @@ type DB struct { driver.Instance driver.Observation driver.Dimension + + Errors chan error } // Subsets allows a clear and concise way of requesting any combination of @@ -59,7 +61,9 @@ func NewDimensionStore(ctx context.Context) (*DB, error) { // New DB returned according to provided subsets and the environment config // satisfying the interfaces requested by the choice of subsets func New(ctx context.Context, choice Subsets) (*DB, error) { - cfg, err := config.Get() + errs := make(chan error) + + cfg, err := config.Get(errs) if err != nil { return nil, err } @@ -107,6 +111,7 @@ func New(ctx context.Context, choice Subsets) (*DB, error) { instance, observation, dimension, + errs, }, nil } diff --git a/vendor/github.com/ONSdigital/dp-graph/neo4j/codelists.go b/vendor/github.com/ONSdigital/dp-graph/neo4j/codelists.go index 93db7e81..4b65a6f7 100644 --- a/vendor/github.com/ONSdigital/dp-graph/neo4j/codelists.go +++ b/vendor/github.com/ONSdigital/dp-graph/neo4j/codelists.go @@ -36,7 +36,7 @@ func (n *Neo4j) GetCodeLists(ctx context.Context, filterBy string) (*models.Code func (n *Neo4j) GetCodeList(ctx context.Context, code string) (*models.CodeList, error) { log.InfoCtx(ctx, "about to query neo4j for code list", log.Data{"code_list_id": code}) - query := fmt.Sprintf(query.CodeListExists, code) + query := fmt.Sprintf(query.GetCodeList, code) codeListResult := &models.CodeList{} if err := n.Read(query, mapper.CodeList(codeListResult, code), true); err != nil { diff --git a/vendor/github.com/ONSdigital/dp-graph/neo4j/neo4j.go b/vendor/github.com/ONSdigital/dp-graph/neo4j/neo4j.go index 1711b9d2..0c36b5c7 100644 --- a/vendor/github.com/ONSdigital/dp-graph/neo4j/neo4j.go +++ b/vendor/github.com/ONSdigital/dp-graph/neo4j/neo4j.go @@ -7,7 +7,7 @@ import ( "time" graph "github.com/ONSdigital/dp-graph/graph/driver" - "github.com/ONSdigital/dp-graph/neo4j/driver" + driver "github.com/ONSdigital/dp-graph/neo4j/neo4jdriver" "github.com/ONSdigital/go-ns/log" neoErrors "github.com/ONSdigital/golang-neo4j-bolt-driver/errors" "github.com/ONSdigital/golang-neo4j-bolt-driver/structures/messages" diff --git a/vendor/github.com/ONSdigital/dp-graph/neo4j/driver/driver.go b/vendor/github.com/ONSdigital/dp-graph/neo4j/neo4jdriver/driver.go similarity index 99% rename from vendor/github.com/ONSdigital/dp-graph/neo4j/driver/driver.go rename to vendor/github.com/ONSdigital/dp-graph/neo4j/neo4jdriver/driver.go index 236e7fe7..10bf51c6 100644 --- a/vendor/github.com/ONSdigital/dp-graph/neo4j/driver/driver.go +++ b/vendor/github.com/ONSdigital/dp-graph/neo4j/neo4jdriver/driver.go @@ -1,4 +1,4 @@ -package driver +package neo4jdriver import ( "context" diff --git a/vendor/github.com/ONSdigital/dp-graph/neo4j/driver/healthcheck.go b/vendor/github.com/ONSdigital/dp-graph/neo4j/neo4jdriver/healthcheck.go similarity index 95% rename from vendor/github.com/ONSdigital/dp-graph/neo4j/driver/healthcheck.go rename to vendor/github.com/ONSdigital/dp-graph/neo4j/neo4jdriver/healthcheck.go index 369f870e..2f7ed418 100644 --- a/vendor/github.com/ONSdigital/dp-graph/neo4j/driver/healthcheck.go +++ b/vendor/github.com/ONSdigital/dp-graph/neo4j/neo4jdriver/healthcheck.go @@ -1,4 +1,4 @@ -package driver +package neo4jdriver const serviceName = "neo4j" const pingStmt = "MATCH (i) RETURN i LIMIT 1" diff --git a/vendor/github.com/ONSdigital/dp-graph/neo4j/driver/row_reader.go b/vendor/github.com/ONSdigital/dp-graph/neo4j/neo4jdriver/row_reader.go similarity index 98% rename from vendor/github.com/ONSdigital/dp-graph/neo4j/driver/row_reader.go rename to vendor/github.com/ONSdigital/dp-graph/neo4j/neo4jdriver/row_reader.go index 67694b32..10c530e0 100644 --- a/vendor/github.com/ONSdigital/dp-graph/neo4j/driver/row_reader.go +++ b/vendor/github.com/ONSdigital/dp-graph/neo4j/neo4jdriver/row_reader.go @@ -1,4 +1,4 @@ -package driver +package neo4jdriver import ( "context" diff --git a/vendor/github.com/ONSdigital/dp-graph/neo4j/query/query.go b/vendor/github.com/ONSdigital/dp-graph/neo4j/query/query.go index d963aedc..6a3a0a86 100644 --- a/vendor/github.com/ONSdigital/dp-graph/neo4j/query/query.go +++ b/vendor/github.com/ONSdigital/dp-graph/neo4j/query/query.go @@ -4,7 +4,6 @@ const ( // codelists GetCodeLists = "MATCH (i) WHERE i:_code_list%s RETURN distinct labels(i) as labels" GetCodeList = "MATCH (i:_code_list:`_code_list_%s`) RETURN i" - CodeListExists = "MATCH (cl:_code_list:`_code_list_%s`) RETURN count(*)" GetCodeListEdition = "MATCH (i:_code_list:`_code_list_%s` {edition:" + `"%s"` + "}) RETURN i" CountEditions = "MATCH (cl:_code_list:`_code_list_%s`) WHERE cl.edition = %q RETURN count(*)" GetCodes = "MATCH (c:_code) -[r:usedBy]->(cl:_code_list: `_code_list_%s`) WHERE cl.edition = %q RETURN c, r" diff --git a/vendor/github.com/ONSdigital/dp-graph/neptune/codelist.go b/vendor/github.com/ONSdigital/dp-graph/neptune/codelist.go new file mode 100644 index 00000000..6804a014 --- /dev/null +++ b/vendor/github.com/ONSdigital/dp-graph/neptune/codelist.go @@ -0,0 +1,229 @@ +/* +This module, when combined with codelistdataset.go, provides code that +satisfies the graph.driver.CodeList interface using Gremlin queries into +a Neptune database. +*/ + +package neptune + +import ( + "context" + "fmt" + + "github.com/pkg/errors" + + "github.com/ONSdigital/dp-code-list-api/models" + "github.com/ONSdigital/dp-graph/graph/driver" + "github.com/ONSdigital/dp-graph/neptune/query" +) + +/* +GetCodeLists provides a list of either all Code Lists, or a list of only those +having a boolean property with the name which is set to true. E.g. +"geography": true. The caller is expected to +fully qualify the embedded Links field afterwards. It returns an error if: +- The Gremlin query failed to execute. +- A CodeList is encountered that does not have *listID* property. +*/ +func (n *NeptuneDB) GetCodeLists(ctx context.Context, filterBy string) (*models.CodeListResults, error) { + // Use differing Gremlin queries - depending on if a filterBy string is specified. + var qry string + if filterBy == "" { + qry = fmt.Sprintf(query.GetCodeLists) + } else { + qry = fmt.Sprintf(query.GetCodeListsFiltered, filterBy) + } + codeListVertices, err := n.getVertices(qry) + if err != nil { + return nil, errors.Wrapf(err, "Gremlin query failed: %q", qry) + } + results := &models.CodeListResults{ + Count: len(codeListVertices), + Limit: len(codeListVertices), + TotalCount: len(codeListVertices), + } + for _, codeListVertex := range codeListVertices { + codeListID, err := codeListVertex.GetProperty("listID") + if err != nil { + return nil, errors.Wrapf(err, `Error reading "listID" property on Code List vertex`) + } + link := &models.CodeListLink{Self: &models.Link{ID: codeListID}} + codeListMdl := models.CodeList{codeListID, link} + results.Items = append(results.Items, codeListMdl) + } + return results, nil +} + +// GetCodeList provides a CodeList for a given ID (e.g. "ashe-earnings"), +// having checked it exists +// in the database. Nb. The caller is expected to fully qualify the embedded +// Links field afterwards. It returns an error if: +// - The Gremlin query failed to execute. +// - The requested CodeList does not exist. (error is `ErrNotFound`) +// - Duplicate CodeLists exist with the given ID (error is `ErrMultipleFound`) +func (n *NeptuneDB) GetCodeList(ctx context.Context, codeListID string) ( + *models.CodeList, error) { + existsQry := fmt.Sprintf(query.CodeListExists, codeListID) + count, err := n.getNumber(existsQry) + if err != nil { + return nil, errors.Wrapf(err, "Gremlin query failed: %q", existsQry) + } + if count == 0 { + return nil, driver.ErrNotFound + } + if count > 1 { + return nil, driver.ErrMultipleFound + } + + return &models.CodeList{ + Links: &models.CodeListLink{ + Self: &models.Link{ + ID: codeListID, + }, + }, + }, nil +} + +/* +GetEditions provides a models.Editions structure populated based on the +the values in the Code List vertices in the database, that have the provided +codeListId. +It returns an error if: +- The Gremlin query failed to execute. (wrapped error) +- No CodeLists are found of the requested codeListID (error is ErrNotFound') +- A CodeList is found that does not have the "edition" property (error is 'ErrNoSuchProperty') +*/ +func (n *NeptuneDB) GetEditions(ctx context.Context, codeListID string) (*models.Editions, error) { + qry := fmt.Sprintf(query.GetCodeList, codeListID) + codeLists, err := n.getVertices(qry) + if err != nil { + return nil, errors.Wrapf(err, "Gremlin query failed: %q", qry) + } + if len(codeLists) == 0 { + return nil, driver.ErrNotFound + } + editions := &models.Editions{ + Count: len(codeLists), + Offset: 0, + Limit: len(codeLists), + TotalCount: len(codeLists), + Items: []models.Edition{}, + } + for _, codeList := range codeLists { + editionString, err := codeList.GetProperty("edition") + if err != nil { + return nil, errors.Wrapf(err, `Error reading "edition" property on Code List vertex`) + } + edition := models.Edition{ + Links: &models.EditionLinks{ + Self: &models.Link{ + ID: editionString, + }, + }, + } + editions.Items = append(editions.Items, edition) + } + return editions, nil +} + +/* +GetEdition provides an Edition structure for the code list in the database that +has both the given codeListID (e.g. "ashed-earnings"), and the given edition string +(e.g. "one-off"). +Nb. The caller is expected to fully qualify the embedded Links field +afterwards. +It returns an error if: +- The Gremlin query failed to execute. (wrapped error) +- No CodeLists exist with the requested codeListID (error is `ErrNotFound`) +- A CodeList is found that does not have the "edition" property (error is 'ErrNoSuchProperty') +- More than one CodeList exists with the requested ID AND edition (error is `ErrMultipleFound`) +*/ +func (n *NeptuneDB) GetEdition(ctx context.Context, codeListID, edition string) (*models.Edition, error) { + qry := fmt.Sprintf(query.CodeListEditionExists, codeListID, edition) + nFound, err := n.getNumber(qry) + if err != nil { + return nil, errors.Wrapf(err, "Gremlin query failed: %q", qry) + } + if nFound == 0 { + return nil, driver.ErrNotFound + } + if nFound > 1 { + return nil, driver.ErrMultipleFound + } + // What we return (having performed the checks above), is actually hard-coded, as a function of the + // method parameters. + return &models.Edition{Links: &models.EditionLinks{Self: &models.Link{ID: edition}}}, nil +} + +/* +GetCodes provides a list of Code(s) packaged into a models.CodeResults structure that has been populated by +a database query that finds the Code List nodes of the required codeListID (e.g. "ashe-earnings"), and the +required edition (e.g. "one-off"), and then harvests the Code nodes that are known to be "usedBy" that +Code List. It raises a wrapped error if the database raises a non-transient error, (e.g. malformed +query). It raises driver.ErrNotFound if the graph traversal above produces an empty list of codes - +including the case of a short-circuit early termination of the query, because no such qualifying code +list exists. It returns a wrapped error if a Code is found that does not have a "value" property. +*/ +func (n *NeptuneDB) GetCodes(ctx context.Context, codeListID, edition string) (*models.CodeResults, error) { + qry := fmt.Sprintf(query.GetCodes, codeListID, edition) + codeResponses, err := n.getVertices(qry) + if err != nil { + return nil, errors.Wrapf(err, "Gremlin query failed: %q", qry) + } + if len(codeResponses) == 0 { + return nil, driver.ErrNotFound + } + codeResults := &models.CodeResults{ + Count: len(codeResponses), + Offset: 0, + Limit: len(codeResponses), + TotalCount: len(codeResponses), + Items: []models.Code{}, + } + + for _, codeResponse := range codeResponses { + codeValue, err := codeResponse.GetProperty("value") + if err != nil { + return nil, errors.Wrapf(err, `Error reading "value" property on Code vertex`) + } + codeItem := models.Code{ + Links: &models.CodeLinks{ + Self: &models.Link{ + ID: codeValue, + }, + }, + } + codeResults.Items = append(codeResults.Items, codeItem) + } + return codeResults, nil +} + +/* +GetCode provides a Code struct to represent the requested code list, edition and code string. +E.g. ashe-earnings|one-off|hourly-pay-gross. +It doesn't need to access the database to form the response, but does so to validate the +query. Specifically it can return errors as follows: +- The Gremlin query failed to execute. +- The query parameter values do not successfully navigate to a Code node. (error is `ErrNotFound`) +- Duplicate Code(s) exist that satisfy the search criteria (error is `ErrMultipleFound`) +*/ +func (n *NeptuneDB) GetCode(ctx context.Context, codeListID, edition string, code string) (*models.Code, error) { + qry := fmt.Sprintf(query.CodeExists, codeListID, edition, code) + nFound, err := n.getNumber(qry) + if err != nil { + return nil, errors.Wrapf(err, "Gremlin query failed: %q", qry) + } + if nFound == 0 { + return nil, driver.ErrNotFound + } + if nFound > 1 { + return nil, driver.ErrMultipleFound + } + return &models.Code{ + Links: &models.CodeLinks{ + Self: &models.Link{ + ID: code, + }, + }, + }, nil +} diff --git a/vendor/github.com/ONSdigital/dp-graph/neptune/codelistsdataset.go b/vendor/github.com/ONSdigital/dp-graph/neptune/codelistsdataset.go new file mode 100644 index 00000000..e13d7d08 --- /dev/null +++ b/vendor/github.com/ONSdigital/dp-graph/neptune/codelistsdataset.go @@ -0,0 +1,165 @@ +/* +This module, when combined with codelist.go, provides code that +satisfies the graph.driver.CodeList interface using Gremlin queries into +a Neptune database. + +It is dedicated to code to satisfy the GetCodeDatasets() method - +which is sufficiently complex to merit a module (and tests) of its own. +*/ +package neptune + +import ( + "context" + "fmt" + "strconv" + + "github.com/pkg/errors" + + "github.com/ONSdigital/dp-code-list-api/models" + "github.com/ONSdigital/dp-graph/neptune/query" +) + +/* +GetCodeDatasets searches the database for datasets that are associated with +the given code list, code, and code list edition. Specifically those that +satisfy all of: + 1) code lists that match the requested code list ID. + 2) code lists of the requested edition. + 3) codes that match the requested code value. + 4) datasets that are related to qualifying codes by *inDataset* edges. + 5) datasets that have the *isPublished* state true. + +Each such result from the database (potentially) has the properties: + - dimensionName (what the dataset calls this dimension) + - datasetEdition + - version + +The results however include all permuations of dimensionName and +datasetEdition - BUT ONLY CITES the most recent dataset *version* of those +found for that permuation. + +*/ +func (n *NeptuneDB) GetCodeDatasets(ctx context.Context, codeListID, edition string, code string) (*models.Datasets, error) { + + // Emit the query and parse the responses. + qry := fmt.Sprintf(query.GetCodeDatasets, codeListID, edition, code) + responses, err := n.getStringList(qry) + if err != nil { + return nil, errors.Wrapf(err, "Gremlin GetCodeDatasets failed: %q", qry) + } + + // Isolate the individual records from the flattened response. + // [['dim', 'edition', 'version', 'datasetID'], ['dim', 'edition', ...]] + responseRecords, err := createRecords(responses) + if err != nil { + return nil, errors.Wrap(err, "Cannot create records.") + } + + // Build datastructure to capture only latest dataset versions. + latestVersionMaps, err := buildLatestVersionMaps(responseRecords) + if err != nil { + return nil, errors.Wrap(err, "Cannot isolate latest versions.") + } + + // Package up the model-ised response. + response := buildResponse(latestVersionMaps, code, codeListID) + return response, nil +} + +/* +createRecords splits a list of strings into clumps of 4 +*/ +func createRecords(responses []string) ([][]string, error) { + var responseRecords = [][]string{} + const stride = 4 // I.e. dimesionName, edition, version, datasetID + if len(responses)%stride != 0 { + return nil, errors.New("List length is not divisible by 4") + } + for i := 0; i < len(responses); i += stride { + dimensionName := responses[i+0] + datasetEdition := responses[i+1] + versionStr := responses[i+2] + datasetID := responses[i+3] + responseRecords = append(responseRecords, []string{dimensionName, datasetEdition, versionStr, datasetID}) + } + return responseRecords, nil +} + +// These (nested) maps track the latest version cited by any combination +// of dimensionName, dataset edition, and datasetID. +// They are all keyed on strings and the nested assembly can be accessed +// like this: +// latestVersion = foo[datasetID][dimension][edition] + +type editionToLatestVersion map[string]int +type dim2Edition map[string]editionToLatestVersion +type datasetID2Dim map[string]dim2Edition + +/* +buildLatestVersionMaps consumes a list of records such as +["dimName1", "datasetEdition1", "version4", "datasetID3"], and builds a datasetID2Dim +structure based on the latest versions available for each combination of +dimension name, dataset edition, and datasetID. +*/ +func buildLatestVersionMaps(responseRecords [][]string) (datasetID2Dim, error) { + did2Dim := datasetID2Dim{} + + for _, record := range responseRecords { + dimensionName := record[0] + datasetEdition := record[1] + versionStr := record[2] + datasetID := record[3] + + versionInt, err := strconv.Atoi(versionStr) + if err != nil { + return nil, errors.Wrapf(err, "Cannot cast version (%q) to int", versionStr) + } + if _, ok := did2Dim[datasetID]; !ok { + did2Dim[datasetID] = dim2Edition{} + } + if _, ok := did2Dim[datasetID][dimensionName]; !ok { + did2Dim[datasetID][dimensionName] = editionToLatestVersion{} + } + latestKnownV, ok := did2Dim[datasetID][dimensionName][datasetEdition] + if !ok || latestKnownV < versionInt { + did2Dim[datasetID][dimensionName][datasetEdition] = versionInt + } + } + return did2Dim, nil +} + +/* +buildResponse is capable of consuming a datasetID2Dim data structure, along +with a few other query parameters, and from these, building the data +structure model hierchy required by the GetCodeDatasets API method. +*/ +func buildResponse(did2Dim datasetID2Dim, code string, codeListID string) *models.Datasets { + datasets := &models.Datasets{ + Items: []models.Dataset{}, + Count: len(did2Dim), + Limit: len(did2Dim), + TotalCount: len(did2Dim), + } + for datasetID, dim2E := range did2Dim { + for dimensionName, e2v := range dim2E { + datasetLinks := &models.DatasetLinks{Self: &models.Link{ID: datasetID}} + dataset := models.Dataset{ + Links: datasetLinks, + DimensionLabel: dimensionName, + Editions: []models.DatasetEdition{}, + } + for datasetEdition, version := range e2v { + versionStr := fmt.Sprintf("%d", version) + edition := models.DatasetEdition{} + edition.Links = &models.DatasetEditionLinks{ + Self: &models.Link{ID: datasetEdition}, + LatestVersion: &models.Link{ID: versionStr}, + DatasetDimension: &models.Link{ID: codeListID}, + } + dataset.Editions = append(dataset.Editions, edition) + } + datasets.Items = append(datasets.Items, dataset) + } + } + return datasets +} diff --git a/vendor/github.com/ONSdigital/dp-graph/neptune/dimension.go b/vendor/github.com/ONSdigital/dp-graph/neptune/dimension.go new file mode 100644 index 00000000..c76ef473 --- /dev/null +++ b/vendor/github.com/ONSdigital/dp-graph/neptune/dimension.go @@ -0,0 +1,11 @@ +package neptune + +import ( + "context" + + "github.com/ONSdigital/dp-dimension-importer/model" +) + +func (n *NeptuneDB) InsertDimension(ctx context.Context, cache map[string]string, i *model.Instance, d *model.Dimension) (*model.Dimension, error) { + return nil, nil +} diff --git a/vendor/github.com/ONSdigital/dp-graph/neptune/driver/driver.go b/vendor/github.com/ONSdigital/dp-graph/neptune/driver/driver.go new file mode 100644 index 00000000..d3fd7426 --- /dev/null +++ b/vendor/github.com/ONSdigital/dp-graph/neptune/driver/driver.go @@ -0,0 +1,23 @@ +package driver + +import ( + "context" + + gremgo "github.com/ONSdigital/gremgo-neptune" +) + +type NeptuneDriver struct { + Pool NeptunePool // Defined with an interface to support mocking. +} + +func New(ctx context.Context, dbAddr string, errs chan error) (*NeptuneDriver, error) { + pool := gremgo.NewPoolWithDialerCtx(ctx, dbAddr, errs) + return &NeptuneDriver{ + Pool: pool, + }, nil +} + +func (n *NeptuneDriver) Close(ctx context.Context) error { + n.Pool.Close() + return nil +} diff --git a/vendor/github.com/ONSdigital/dp-graph/neptune/driver/healthcheck.go b/vendor/github.com/ONSdigital/dp-graph/neptune/driver/healthcheck.go new file mode 100644 index 00000000..7c060706 --- /dev/null +++ b/vendor/github.com/ONSdigital/dp-graph/neptune/driver/healthcheck.go @@ -0,0 +1,14 @@ +package driver + +const ( + serviceName = "neptune" + pingStmt = "g.V().limit(1)" +) + +// Healthcheck calls neptune to check its health status +func (n *NeptuneDriver) Healthcheck() (s string, err error) { + if _, err = n.Pool.Get(pingStmt, nil, nil); err != nil { + return serviceName, err + } + return serviceName, nil +} diff --git a/vendor/github.com/ONSdigital/dp-graph/neptune/driver/neptunepool.go b/vendor/github.com/ONSdigital/dp-graph/neptune/driver/neptunepool.go new file mode 100644 index 00000000..25596b99 --- /dev/null +++ b/vendor/github.com/ONSdigital/dp-graph/neptune/driver/neptunepool.go @@ -0,0 +1,23 @@ +package driver + +import ( + "context" + + gremgo "github.com/ONSdigital/gremgo-neptune" +) + +//go:generate moq -out ../internal/pool.go -pkg internal . NeptunePool + +/* +NeptunePool defines the contract required of the gremgo +connection Pool by the Neptune.Driver. +*/ +type NeptunePool interface { + Close() + Execute(query string, bindings, rebindings map[string]string) (resp []gremgo.Response, err error) + Get(query string, bindings, rebindings map[string]string) (resp interface{}, err error) + GetCount(q string, bindings, rebindings map[string]string) (i int64, err error) + GetE(q string, bindings, rebindings map[string]string) (resp interface{}, err error) + OpenCursorCtx(ctx context.Context, query string, bindings, rebindings map[string]string) (cursor *gremgo.Cursor, err error) + GetStringList(query string, bindings, rebindings map[string]string) (vals []string, err error) +} diff --git a/vendor/github.com/ONSdigital/dp-graph/neptune/hierarchy.go b/vendor/github.com/ONSdigital/dp-graph/neptune/hierarchy.go new file mode 100644 index 00000000..6617bed0 --- /dev/null +++ b/vendor/github.com/ONSdigital/dp-graph/neptune/hierarchy.go @@ -0,0 +1,274 @@ +package neptune + +import ( + "context" + "fmt" + + "github.com/ONSdigital/dp-graph/neptune/query" + "github.com/ONSdigital/dp-hierarchy-api/models" + "github.com/ONSdigital/go-ns/log" + "github.com/gedge/graphson" +) + +func (n *NeptuneDB) CreateInstanceHierarchyConstraints(ctx context.Context, attempt int, instanceID, dimensionName string) error { + return nil +} + +func (n *NeptuneDB) CloneNodes(ctx context.Context, attempt int, instanceID, codeListID, dimensionName string) (err error) { + gremStmt := fmt.Sprintf( + query.CloneHierarchyNodes, + codeListID, + instanceID, + dimensionName, + codeListID, + ) + logData := log.Data{"fn": "CloneNodes", + "gremlin": gremStmt, + "instance_id": instanceID, + "code_list_id": codeListID, + "dimension_name": dimensionName, + } + log.Debug("cloning nodes from the generic hierarchy", logData) + + if _, err = n.getVertices(gremStmt); err != nil { + log.ErrorC("get", err, logData) + return + } + + return +} + +func (n *NeptuneDB) CountNodes(ctx context.Context, instanceID, dimensionName string) (count int64, err error) { + gremStmt := fmt.Sprintf(query.CountHierarchyNodes, instanceID, dimensionName) + logData := log.Data{ + "fn": "CountNodes", + "gremlin": gremStmt, + "instance_id": instanceID, + "dimension_name": dimensionName, + } + log.Debug("counting nodes in the new instance hierarchy", logData) + + if count, err = n.getNumber(gremStmt); err != nil { + log.ErrorC("getNumber", err, logData) + return + } + return +} + +func (n *NeptuneDB) CloneRelationships(ctx context.Context, attempt int, instanceID, codeListID, dimensionName string) (err error) { + gremStmt := fmt.Sprintf( + query.CloneHierarchyRelationships, + codeListID, + instanceID, + dimensionName, + instanceID, + dimensionName, + ) + logData := log.Data{ + "fn": "CloneRelationships", + "instance_id": instanceID, + "code_list_id": codeListID, + "dimension_name": dimensionName, + "gremlin": gremStmt, + } + log.Debug("cloning relationships from the generic hierarchy", logData) + + if _, err = n.getEdges(gremStmt); err != nil { + log.ErrorC("getEdges", err, logData) + return + } + + return n.RemoveCloneEdges(ctx, attempt, instanceID, dimensionName) +} + +func (n *NeptuneDB) RemoveCloneEdges(ctx context.Context, attempt int, instanceID, dimensionName string) (err error) { + gremStmt := fmt.Sprintf( + query.RemoveCloneMarkers, + instanceID, + dimensionName, + ) + logData := log.Data{ + "fn": "RemoveCloneEdges", + "instance_id": instanceID, + "dimension_name": dimensionName, + "gremlin": gremStmt, + } + log.Debug("removing edges to generic hierarchy", logData) + + if _, err = n.exec(gremStmt); err != nil { + log.ErrorC("exec", err, logData) + return + } + return +} + +func (n *NeptuneDB) SetNumberOfChildren(ctx context.Context, attempt int, instanceID, dimensionName string) (err error) { + gremStmt := fmt.Sprintf( + query.SetNumberOfChildren, + instanceID, + dimensionName, + ) + + logData := log.Data{ + "fn": "SetNumberOfChildren", + "instance_id": instanceID, + "dimension_name": dimensionName, + "gremlin": gremStmt, + } + + log.Debug("setting number-of-children property value on the instance hierarchy nodes", logData) + + if _, err = n.getVertices(gremStmt); err != nil { + log.ErrorC("getV", err, logData) + return + } + + return +} + +func (n *NeptuneDB) SetHasData(ctx context.Context, attempt int, instanceID, dimensionName string) (err error) { + gremStmt := fmt.Sprintf( + query.SetHasData, + instanceID, + dimensionName, + instanceID, + dimensionName, + ) + + logData := log.Data{ + "instance_id": instanceID, + "dimension_name": dimensionName, + "gremlin": gremStmt, + } + + log.Debug("setting has-data property on the instance hierarchy", logData) + + if _, err = n.getVertices(gremStmt); err != nil { + log.ErrorC("getV", err, logData) + return + } + + return +} + +func (n *NeptuneDB) MarkNodesToRemain(ctx context.Context, attempt int, instanceID, dimensionName string) (err error) { + gremStmt := fmt.Sprintf(query.MarkNodesToRemain, + instanceID, + dimensionName, + // instanceID, + // dimensionName, + ) + + logData := log.Data{ + "instance_id": instanceID, + "dimension_name": dimensionName, + "gremlin": gremStmt, + } + + log.Debug("marking nodes to remain after trimming sparse branches", logData) + + if _, err = n.getVertices(gremStmt); err != nil { + log.ErrorC("getV", err, logData) + return + } + + return +} + +func (n *NeptuneDB) RemoveNodesNotMarkedToRemain(ctx context.Context, attempt int, instanceID, dimensionName string) (err error) { + gremStmt := fmt.Sprintf(query.RemoveNodesNotMarkedToRemain, instanceID, dimensionName) + logData := log.Data{ + "instance_id": instanceID, + "dimension_name": dimensionName, + "gremlin": gremStmt, + } + + log.Debug("removing nodes not marked to remain after trimming sparse branches", logData) + + if _, err = n.exec(gremStmt); err != nil { + log.ErrorC("exec", err, logData) + return + } + return +} + +func (n *NeptuneDB) RemoveRemainMarker(ctx context.Context, attempt int, instanceID, dimensionName string) (err error) { + gremStmt := fmt.Sprintf(query.RemoveRemainMarker, instanceID, dimensionName) + logData := log.Data{ + "fn": "RemoveRemainMarker", + "gremlin": gremStmt, + "instance_id": instanceID, + "dimension_name": dimensionName, + } + log.Debug("removing the remain property from the nodes that remain", logData) + + if _, err = n.exec(gremStmt); err != nil { + log.ErrorC("exec", err, logData) + return + } + return +} + +func (n *NeptuneDB) GetHierarchyCodelist(ctx context.Context, instanceID, dimension string) (codelistID string, err error) { + gremStmt := fmt.Sprintf(query.HierarchyExists, instanceID, dimension) + logData := log.Data{ + "fn": "GetHierarchyCodelist", + "gremlin": gremStmt, + "instance_id": instanceID, + "dimension_name": dimension, + } + + var vertex graphson.Vertex + if vertex, err = n.getVertex(gremStmt); err != nil { + log.ErrorC("get", err, logData) + return + } + if codelistID, err = vertex.GetProperty("code_list"); err != nil { + log.ErrorC("bad prop", err, logData) + return + } + return +} + +func (n *NeptuneDB) GetHierarchyRoot(ctx context.Context, instanceID, dimension string) (node *models.Response, err error) { + gremStmt := fmt.Sprintf(query.GetHierarchyRoot, instanceID, dimension) + logData := log.Data{ + "fn": "GetHierarchyRoot", + "gremlin": gremStmt, + "instance_id": instanceID, + "dimension_name": dimension, + } + + var vertex graphson.Vertex + if vertex, err = n.getVertex(gremStmt); err != nil { + log.ErrorC("get", err, logData) + return + } + if node, err = n.convertVertexToResponse(vertex, instanceID, dimension); err != nil { + log.ErrorC("conv", err, logData) + return + } + return +} + +func (n *NeptuneDB) GetHierarchyElement(ctx context.Context, instanceID, dimension, code string) (node *models.Response, err error) { + gremStmt := fmt.Sprintf(query.GetHierarchyElement, instanceID, dimension, code) + logData := log.Data{ + "fn": "GetHierarchyElement", + "gremlin": gremStmt, + "instance_id": instanceID, + "code_list_id": code, + "dimension_name": dimension, + } + + var vertex graphson.Vertex + if vertex, err = n.getVertex(gremStmt); err != nil { + log.ErrorC("get", err, logData) + return + } + if node, err = n.convertVertexToResponse(vertex, instanceID, dimension); err != nil { + log.ErrorC("conv", err, logData) + return + } + return +} diff --git a/vendor/github.com/ONSdigital/dp-graph/neptune/instance.go b/vendor/github.com/ONSdigital/dp-graph/neptune/instance.go new file mode 100644 index 00000000..f0da37ad --- /dev/null +++ b/vendor/github.com/ONSdigital/dp-graph/neptune/instance.go @@ -0,0 +1,39 @@ +package neptune + +import ( + "context" + + "github.com/ONSdigital/dp-dimension-importer/model" +) + +func (n *NeptuneDB) CountInsertedObservations(ctx context.Context, instanceID string) (count int64, err error) { + return 0, nil +} + +func (n *NeptuneDB) AddVersionDetailsToInstance(ctx context.Context, instanceID string, datasetID string, edition string, version int) error { + return nil +} + +func (n *NeptuneDB) SetInstanceIsPublished(ctx context.Context, instanceID string) error { + return nil +} + +func (n *NeptuneDB) CreateInstanceConstraint(ctx context.Context, i *model.Instance) error { + return nil +} + +func (n *NeptuneDB) CreateInstance(ctx context.Context, i *model.Instance) error { + return nil +} + +func (n *NeptuneDB) AddDimensions(ctx context.Context, i *model.Instance) error { + return nil +} + +func (n *NeptuneDB) CreateCodeRelationship(ctx context.Context, i *model.Instance, codeListID, code string) error { + return nil +} + +func (n *NeptuneDB) InstanceExists(ctx context.Context, i *model.Instance) (bool, error) { + return true, nil +} diff --git a/vendor/github.com/ONSdigital/dp-graph/neptune/internal/mockpoolutils.go b/vendor/github.com/ONSdigital/dp-graph/neptune/internal/mockpoolutils.go new file mode 100644 index 00000000..c63afc1f --- /dev/null +++ b/vendor/github.com/ONSdigital/dp-graph/neptune/internal/mockpoolutils.go @@ -0,0 +1,184 @@ +package internal + +import ( + "fmt" + + "github.com/gedge/graphson" +) + +/* +This module provides a handful of mock convenience functions that can be +used to inject behaviour into NeptunePoolMock. +*/ + +import ( + "errors" +) + +// ReturnOne is a mock implementation for NeptunePool.GetCount() +// that always returns a count of 1. +var ReturnOne = func(q string, bindings, rebindings map[string]string) (i int64, err error) { + return 1, nil +} + +// ReturnTwo is a mock implementation for NeptunePool.GetCount() +// that always returns a count of 2. +var ReturnTwo = func(q string, bindings, rebindings map[string]string) (i int64, err error) { + return 2, nil +} + +// ReturnZero is a mock implementation for NeptunePool.GetCount() +// that always returns a count of 0. +var ReturnZero = func(q string, bindings, rebindings map[string]string) (i int64, err error) { + return 0, nil +} + +// ReturnMalformedIntRequestErr is a mock implementation for NeptunePool.GetCount() +// that always returns an error that is judged to be not transient by +// neptune.isTransientError +var ReturnMalformedIntRequestErr = func(q string, bindings, rebindings map[string]string) (i int64, err error) { + return -1, errors.New(" MALFORMED REQUEST ") +} + +// ReturnMalformedNilInterfaceRequestErr is a mock implementation for +// NeptunePool functions that return (Interface{}, error) which always returns an +// error that is judged to be not transient by neptune.isTransientError +var ReturnMalformedNilInterfaceRequestErr = func(q string, bindings, rebindings map[string]string) (interface{}, error) { + return nil, errors.New(" MALFORMED REQUEST ") +} + +// ReturnMalformedStringListRequestErr is a mock implementation for +// NeptunePool functions that return ([]string, error) which always returns an +// error that is judged to be not transient by neptune.isTransientError +var ReturnMalformedStringListRequestErr = func(q string, bindings, rebindings map[string]string) ([]string, error) { + return nil, errors.New(" MALFORMED REQUEST ") +} + +// ReturnThreeCodeLists is mock implementation for NeptunePool.Get() that always +// returns a slice of three graphson.Vertex(s): +// - of type "_code_list" +// - with a "listID" property set to "listID_0", "listID_1", and "ListID_2" respectively. +// - with an "edition" property set to "my-test-edition" +var ReturnThreeCodeLists = func(query string, bindings map[string]string, rebindings map[string]string) (interface{}, error) { + codeLists := []graphson.Vertex{} + for i := 0; i < 3; i++ { + vertex := makeCodeListVertex(i, "my-test-edition") + codeLists = append(codeLists, vertex) + } + return codeLists, nil +} + +// ReturnThreeEditionVertices is mock implementation for NeptunePool.Get() that always +// returns a slice of three graphson.Vertex(s): +// - of type "unused-vertex-type" +// - with a an "edition" property set to "edition_0", "edition_1", and "edition_2" respectively. +var ReturnThreeEditionVertices = func(query string, bindings map[string]string, rebindings map[string]string) (interface{}, error) { + editions := []graphson.Vertex{} + for i := 0; i < 3; i++ { + vertex := makeVertex("unused-vertex-type") + setVertexStringProperty(&vertex, "edition", fmt.Sprintf("edition_%d", i)) + editions = append(editions, vertex) + } + return editions, nil +} + +// ReturnThreeCodeVertices is mock implementation for NeptunePool.Get() that always +// returns a slice of three graphson.Vertex(s): +// - of type "unused-vertex-type" +// - with a "value" property set to "code_0", "code_1", and "code_2" respectively. +var ReturnThreeCodeVertices = func(query string, bindings map[string]string, rebindings map[string]string) (interface{}, error) { + codes := []graphson.Vertex{} + for i := 0; i < 3; i++ { + vertex := makeVertex("unused-vertex-type") + setVertexStringProperty(&vertex, "value", fmt.Sprintf("code_%d", i)) + codes = append(codes, vertex) + } + return codes, nil +} + +// ReturnThreeUselessVertices is mock implementation for NeptunePool.Get() that always +// returns a slice of three graphson.Vertex(s) of type "_useless_vertex_type", and with +// no properties set. +var ReturnThreeUselessVertices = func(query string, bindings map[string]string, rebindings map[string]string) (interface{}, error) { + codeLists := []graphson.Vertex{} + for i := 0; i < 3; i++ { + vertex := makeVertex("_useless_vertex_type") + codeLists = append(codeLists, vertex) + } + return codeLists, nil +} + +// ReturnZeroVertices provides an empty list of graphson.Vertex(s) +var ReturnZeroVertices = func(query string, bindings map[string]string, rebindings map[string]string) (interface{}, error) { + return []graphson.Vertex{}, nil +} + +/* +makeVertex makes a graphson.Vertex of a given type (e.g. "_code_list"). +*/ +func makeVertex(vertexType string) graphson.Vertex { + vertexValue := graphson.VertexValue{ + ID: "unused_vertex_value_ID", + Label: vertexType, + Properties: map[string][]graphson.VertexProperty{}, + } + vertex := graphson.Vertex{Type: vertexType, Value: vertexValue} + return vertex +} + +/* +setVertexTypedProperty sets the given key/polymorphic-value to a vertex. +The "theType" parameter must be "string" or "int". +*/ +func setVertexTypedProperty(theType string, vertex *graphson.Vertex, key string, value interface{}) { + gv := graphson.GenericValue{Type: "string", Value: key} + pv := graphson.VertexPropertyValue{ + ID: gv, + Label: key, + Value: value, + } + vertexProperty := graphson.VertexProperty{Type: theType, Value: pv} + vertexProperties := []graphson.VertexProperty{vertexProperty} + vertex.Value.Properties[key] = vertexProperties +} + +// setVertexStringProperty sets the given key/value in a vertex. +func setVertexStringProperty(vertex *graphson.Vertex, key string, value interface{}) { + setVertexTypedProperty("string", vertex, key, value) +} + +// setVertexIntProperty sets the given key/value in a vertex. +func setVertexIntProperty(vertex *graphson.Vertex, key string, value int) { + setVertexTypedProperty("int", vertex, key, value) +} + +// makeCodeListVertex provides a graphson.Vertex with a vertex type of the +// form "_code_list", and a "listID" property of the form "listID_3". +// It is also given an "edition" property with the supplied value. +func makeCodeListVertex(listIDSuffix int, edition string) graphson.Vertex { + v := makeVertex("_code_list") + setVertexStringProperty(&v, "listID", fmt.Sprintf("listID_%d", listIDSuffix)) + setVertexStringProperty(&v, "edition", edition) + return v +} + +// ReturnFiveStrings is a mock implementation for +// NeptunePool functions that return ([]string, error) which always returns +// five strings. +var ReturnFiveStrings = func(q string, bindings, rebindings map[string]string) ([]string, error) { + return []string{"a", "b", "c", "d", "e"}, nil +} + +// ReturnStringRecordWithNonIntegerFourthElement is a mock implementation for +// NeptunePool functions that return ([]string, error) which always returns +// 4 strings - in which the third one cannot be cast to an integer. +var ReturnStringRecordWithNonIntegerFourthElement = func(q string, bindings, rebindings map[string]string) ([]string, error) { + return []string{"1", "2", "fibble", "3"}, nil +} + +// ReturnProperlyFormedDatasetRecord is a mock implementation for +// NeptunePool functions that return ([]string, error) which always returns +// A single quartet of strings that should satisfy the GetCodeDatasets method. +var ReturnProperlyFormedDatasetRecord = func(q string, bindings, rebindings map[string]string) ([]string, error) { + return []string{"exampleDimName", "exampleDatasetEdition", "3", "exampleDatasetID"}, nil +} diff --git a/vendor/github.com/ONSdigital/dp-graph/neptune/internal/pool.go b/vendor/github.com/ONSdigital/dp-graph/neptune/internal/pool.go new file mode 100644 index 00000000..61dbf0ff --- /dev/null +++ b/vendor/github.com/ONSdigital/dp-graph/neptune/internal/pool.go @@ -0,0 +1,408 @@ +// Code generated by moq; DO NOT EDIT. +// github.com/matryer/moq + +package internal + +import ( + "context" + "github.com/ONSdigital/dp-graph/neptune/driver" + "github.com/ONSdigital/gremgo-neptune" + "sync" +) + +var ( + lockNeptunePoolMockClose sync.RWMutex + lockNeptunePoolMockExecute sync.RWMutex + lockNeptunePoolMockGet sync.RWMutex + lockNeptunePoolMockGetCount sync.RWMutex + lockNeptunePoolMockGetE sync.RWMutex + lockNeptunePoolMockGetStringList sync.RWMutex + lockNeptunePoolMockOpenCursorCtx sync.RWMutex +) + +// Ensure, that NeptunePoolMock does implement NeptunePool. +// If this is not the case, regenerate this file with moq. +var _ driver.NeptunePool = &NeptunePoolMock{} + +// NeptunePoolMock is a mock implementation of NeptunePool. +// +// func TestSomethingThatUsesNeptunePool(t *testing.T) { +// +// // make and configure a mocked NeptunePool +// mockedNeptunePool := &NeptunePoolMock{ +// CloseFunc: func() { +// panic("mock out the Close method") +// }, +// ExecuteFunc: func(query string, bindings map[string]string, rebindings map[string]string) ([]gremgo.Response, error) { +// panic("mock out the Execute method") +// }, +// GetFunc: func(query string, bindings map[string]string, rebindings map[string]string) (interface{}, error) { +// panic("mock out the Get method") +// }, +// GetCountFunc: func(q string, bindings map[string]string, rebindings map[string]string) (int64, error) { +// panic("mock out the GetCount method") +// }, +// GetEFunc: func(q string, bindings map[string]string, rebindings map[string]string) (interface{}, error) { +// panic("mock out the GetE method") +// }, +// GetStringListFunc: func(query string, bindings map[string]string, rebindings map[string]string) ([]string, error) { +// panic("mock out the GetStringList method") +// }, +// OpenCursorCtxFunc: func(ctx context.Context, query string, bindings map[string]string, rebindings map[string]string) (*gremgo.Cursor, error) { +// panic("mock out the OpenCursorCtx method") +// }, +// } +// +// // use mockedNeptunePool in code that requires NeptunePool +// // and then make assertions. +// +// } +type NeptunePoolMock struct { + // CloseFunc mocks the Close method. + CloseFunc func() + + // ExecuteFunc mocks the Execute method. + ExecuteFunc func(query string, bindings map[string]string, rebindings map[string]string) ([]gremgo.Response, error) + + // GetFunc mocks the Get method. + GetFunc func(query string, bindings map[string]string, rebindings map[string]string) (interface{}, error) + + // GetCountFunc mocks the GetCount method. + GetCountFunc func(q string, bindings map[string]string, rebindings map[string]string) (int64, error) + + // GetEFunc mocks the GetE method. + GetEFunc func(q string, bindings map[string]string, rebindings map[string]string) (interface{}, error) + + // GetStringListFunc mocks the GetStringList method. + GetStringListFunc func(query string, bindings map[string]string, rebindings map[string]string) ([]string, error) + + // OpenCursorCtxFunc mocks the OpenCursorCtx method. + OpenCursorCtxFunc func(ctx context.Context, query string, bindings map[string]string, rebindings map[string]string) (*gremgo.Cursor, error) + + // calls tracks calls to the methods. + calls struct { + // Close holds details about calls to the Close method. + Close []struct { + } + // Execute holds details about calls to the Execute method. + Execute []struct { + // Query is the query argument value. + Query string + // Bindings is the bindings argument value. + Bindings map[string]string + // Rebindings is the rebindings argument value. + Rebindings map[string]string + } + // Get holds details about calls to the Get method. + Get []struct { + // Query is the query argument value. + Query string + // Bindings is the bindings argument value. + Bindings map[string]string + // Rebindings is the rebindings argument value. + Rebindings map[string]string + } + // GetCount holds details about calls to the GetCount method. + GetCount []struct { + // Q is the q argument value. + Q string + // Bindings is the bindings argument value. + Bindings map[string]string + // Rebindings is the rebindings argument value. + Rebindings map[string]string + } + // GetE holds details about calls to the GetE method. + GetE []struct { + // Q is the q argument value. + Q string + // Bindings is the bindings argument value. + Bindings map[string]string + // Rebindings is the rebindings argument value. + Rebindings map[string]string + } + // GetStringList holds details about calls to the GetStringList method. + GetStringList []struct { + // Query is the query argument value. + Query string + // Bindings is the bindings argument value. + Bindings map[string]string + // Rebindings is the rebindings argument value. + Rebindings map[string]string + } + // OpenCursorCtx holds details about calls to the OpenCursorCtx method. + OpenCursorCtx []struct { + // Ctx is the ctx argument value. + Ctx context.Context + // Query is the query argument value. + Query string + // Bindings is the bindings argument value. + Bindings map[string]string + // Rebindings is the rebindings argument value. + Rebindings map[string]string + } + } +} + +// Close calls CloseFunc. +func (mock *NeptunePoolMock) Close() { + if mock.CloseFunc == nil { + panic("NeptunePoolMock.CloseFunc: method is nil but NeptunePool.Close was just called") + } + callInfo := struct { + }{} + lockNeptunePoolMockClose.Lock() + mock.calls.Close = append(mock.calls.Close, callInfo) + lockNeptunePoolMockClose.Unlock() + mock.CloseFunc() +} + +// CloseCalls gets all the calls that were made to Close. +// Check the length with: +// len(mockedNeptunePool.CloseCalls()) +func (mock *NeptunePoolMock) CloseCalls() []struct { +} { + var calls []struct { + } + lockNeptunePoolMockClose.RLock() + calls = mock.calls.Close + lockNeptunePoolMockClose.RUnlock() + return calls +} + +// Execute calls ExecuteFunc. +func (mock *NeptunePoolMock) Execute(query string, bindings map[string]string, rebindings map[string]string) ([]gremgo.Response, error) { + if mock.ExecuteFunc == nil { + panic("NeptunePoolMock.ExecuteFunc: method is nil but NeptunePool.Execute was just called") + } + callInfo := struct { + Query string + Bindings map[string]string + Rebindings map[string]string + }{ + Query: query, + Bindings: bindings, + Rebindings: rebindings, + } + lockNeptunePoolMockExecute.Lock() + mock.calls.Execute = append(mock.calls.Execute, callInfo) + lockNeptunePoolMockExecute.Unlock() + return mock.ExecuteFunc(query, bindings, rebindings) +} + +// ExecuteCalls gets all the calls that were made to Execute. +// Check the length with: +// len(mockedNeptunePool.ExecuteCalls()) +func (mock *NeptunePoolMock) ExecuteCalls() []struct { + Query string + Bindings map[string]string + Rebindings map[string]string +} { + var calls []struct { + Query string + Bindings map[string]string + Rebindings map[string]string + } + lockNeptunePoolMockExecute.RLock() + calls = mock.calls.Execute + lockNeptunePoolMockExecute.RUnlock() + return calls +} + +// Get calls GetFunc. +func (mock *NeptunePoolMock) Get(query string, bindings map[string]string, rebindings map[string]string) (interface{}, error) { + if mock.GetFunc == nil { + panic("NeptunePoolMock.GetFunc: method is nil but NeptunePool.Get was just called") + } + callInfo := struct { + Query string + Bindings map[string]string + Rebindings map[string]string + }{ + Query: query, + Bindings: bindings, + Rebindings: rebindings, + } + lockNeptunePoolMockGet.Lock() + mock.calls.Get = append(mock.calls.Get, callInfo) + lockNeptunePoolMockGet.Unlock() + return mock.GetFunc(query, bindings, rebindings) +} + +// GetCalls gets all the calls that were made to Get. +// Check the length with: +// len(mockedNeptunePool.GetCalls()) +func (mock *NeptunePoolMock) GetCalls() []struct { + Query string + Bindings map[string]string + Rebindings map[string]string +} { + var calls []struct { + Query string + Bindings map[string]string + Rebindings map[string]string + } + lockNeptunePoolMockGet.RLock() + calls = mock.calls.Get + lockNeptunePoolMockGet.RUnlock() + return calls +} + +// GetCount calls GetCountFunc. +func (mock *NeptunePoolMock) GetCount(q string, bindings map[string]string, rebindings map[string]string) (int64, error) { + if mock.GetCountFunc == nil { + panic("NeptunePoolMock.GetCountFunc: method is nil but NeptunePool.GetCount was just called") + } + callInfo := struct { + Q string + Bindings map[string]string + Rebindings map[string]string + }{ + Q: q, + Bindings: bindings, + Rebindings: rebindings, + } + lockNeptunePoolMockGetCount.Lock() + mock.calls.GetCount = append(mock.calls.GetCount, callInfo) + lockNeptunePoolMockGetCount.Unlock() + return mock.GetCountFunc(q, bindings, rebindings) +} + +// GetCountCalls gets all the calls that were made to GetCount. +// Check the length with: +// len(mockedNeptunePool.GetCountCalls()) +func (mock *NeptunePoolMock) GetCountCalls() []struct { + Q string + Bindings map[string]string + Rebindings map[string]string +} { + var calls []struct { + Q string + Bindings map[string]string + Rebindings map[string]string + } + lockNeptunePoolMockGetCount.RLock() + calls = mock.calls.GetCount + lockNeptunePoolMockGetCount.RUnlock() + return calls +} + +// GetE calls GetEFunc. +func (mock *NeptunePoolMock) GetE(q string, bindings map[string]string, rebindings map[string]string) (interface{}, error) { + if mock.GetEFunc == nil { + panic("NeptunePoolMock.GetEFunc: method is nil but NeptunePool.GetE was just called") + } + callInfo := struct { + Q string + Bindings map[string]string + Rebindings map[string]string + }{ + Q: q, + Bindings: bindings, + Rebindings: rebindings, + } + lockNeptunePoolMockGetE.Lock() + mock.calls.GetE = append(mock.calls.GetE, callInfo) + lockNeptunePoolMockGetE.Unlock() + return mock.GetEFunc(q, bindings, rebindings) +} + +// GetECalls gets all the calls that were made to GetE. +// Check the length with: +// len(mockedNeptunePool.GetECalls()) +func (mock *NeptunePoolMock) GetECalls() []struct { + Q string + Bindings map[string]string + Rebindings map[string]string +} { + var calls []struct { + Q string + Bindings map[string]string + Rebindings map[string]string + } + lockNeptunePoolMockGetE.RLock() + calls = mock.calls.GetE + lockNeptunePoolMockGetE.RUnlock() + return calls +} + +// GetStringList calls GetStringListFunc. +func (mock *NeptunePoolMock) GetStringList(query string, bindings map[string]string, rebindings map[string]string) ([]string, error) { + if mock.GetStringListFunc == nil { + panic("NeptunePoolMock.GetStringListFunc: method is nil but NeptunePool.GetStringList was just called") + } + callInfo := struct { + Query string + Bindings map[string]string + Rebindings map[string]string + }{ + Query: query, + Bindings: bindings, + Rebindings: rebindings, + } + lockNeptunePoolMockGetStringList.Lock() + mock.calls.GetStringList = append(mock.calls.GetStringList, callInfo) + lockNeptunePoolMockGetStringList.Unlock() + return mock.GetStringListFunc(query, bindings, rebindings) +} + +// GetStringListCalls gets all the calls that were made to GetStringList. +// Check the length with: +// len(mockedNeptunePool.GetStringListCalls()) +func (mock *NeptunePoolMock) GetStringListCalls() []struct { + Query string + Bindings map[string]string + Rebindings map[string]string +} { + var calls []struct { + Query string + Bindings map[string]string + Rebindings map[string]string + } + lockNeptunePoolMockGetStringList.RLock() + calls = mock.calls.GetStringList + lockNeptunePoolMockGetStringList.RUnlock() + return calls +} + +// OpenCursorCtx calls OpenCursorCtxFunc. +func (mock *NeptunePoolMock) OpenCursorCtx(ctx context.Context, query string, bindings map[string]string, rebindings map[string]string) (*gremgo.Cursor, error) { + if mock.OpenCursorCtxFunc == nil { + panic("NeptunePoolMock.OpenCursorCtxFunc: method is nil but NeptunePool.OpenCursorCtx was just called") + } + callInfo := struct { + Ctx context.Context + Query string + Bindings map[string]string + Rebindings map[string]string + }{ + Ctx: ctx, + Query: query, + Bindings: bindings, + Rebindings: rebindings, + } + lockNeptunePoolMockOpenCursorCtx.Lock() + mock.calls.OpenCursorCtx = append(mock.calls.OpenCursorCtx, callInfo) + lockNeptunePoolMockOpenCursorCtx.Unlock() + return mock.OpenCursorCtxFunc(ctx, query, bindings, rebindings) +} + +// OpenCursorCtxCalls gets all the calls that were made to OpenCursorCtx. +// Check the length with: +// len(mockedNeptunePool.OpenCursorCtxCalls()) +func (mock *NeptunePoolMock) OpenCursorCtxCalls() []struct { + Ctx context.Context + Query string + Bindings map[string]string + Rebindings map[string]string +} { + var calls []struct { + Ctx context.Context + Query string + Bindings map[string]string + Rebindings map[string]string + } + lockNeptunePoolMockOpenCursorCtx.RLock() + calls = mock.calls.OpenCursorCtx + lockNeptunePoolMockOpenCursorCtx.RUnlock() + return calls +} diff --git a/vendor/github.com/ONSdigital/dp-graph/neptune/mapper.go b/vendor/github.com/ONSdigital/dp-graph/neptune/mapper.go new file mode 100644 index 00000000..925171e8 --- /dev/null +++ b/vendor/github.com/ONSdigital/dp-graph/neptune/mapper.go @@ -0,0 +1,83 @@ +package neptune + +import ( + "fmt" + + "github.com/ONSdigital/dp-graph/neptune/query" + "github.com/ONSdigital/dp-hierarchy-api/models" + "github.com/ONSdigital/go-ns/log" + "github.com/gedge/graphson" +) + +func (n *NeptuneDB) convertVertexToResponse(v graphson.Vertex, instanceID, dimension string) (res *models.Response, err error) { + logData := log.Data{"fn": "convertVertexToResponse"} + + res = &models.Response{ + ID: v.GetID(), + } + if res.Label, err = v.GetLabel(); err != nil { + log.ErrorC("bad label", err, logData) + return + } + if res.NoOfChildren, err = v.GetPropertyInt64("numberOfChildren"); err != nil { + log.ErrorC("bad numberOfChildren", err, logData) + return + } + if res.HasData, err = v.GetPropertyBool("hasData"); err != nil { + log.ErrorC("bad hasData", err, logData) + return + } + if res.NoOfChildren > 0 && instanceID != "" { + var code string + if code, err = v.GetProperty("code"); err != nil { + log.ErrorC("bad GetProp code", err, logData) + return + } + + gremStmt := fmt.Sprintf(query.GetChildren, instanceID, dimension, code) + logData["statement"] = gremStmt + + var childVertices []graphson.Vertex + if childVertices, err = n.getVertices(gremStmt); err != nil { + log.ErrorC("get", err, logData) + return + } + if int64(len(childVertices)) != res.NoOfChildren { + logData["num_children_prop"] = res.NoOfChildren + logData["num_children_get"] = len(childVertices) + logData["node_id"] = res.ID + log.Info("child count mismatch", logData) + } + var childElement *models.Element + for _, child := range childVertices { + if childElement, err = convertVertexToElement(child); err != nil { + log.ErrorC("converting child", err, logData) + return + } + res.Children = append(res.Children, childElement) + } + } + return +} + +func convertVertexToElement(v graphson.Vertex) (res *models.Element, err error) { + logData := log.Data{"fn": "convertVertexToElement"} + + res = &models.Element{ + ID: v.GetID(), + } + + if res.Label, err = v.GetLabel(); err != nil { + log.ErrorC("bad label", err, logData) + return + } + if res.NoOfChildren, err = v.GetPropertyInt64("numberOfChildren"); err != nil { + log.ErrorC("bad numberOfChildren", err, logData) + return + } + if res.HasData, err = v.GetPropertyBool("hasData"); err != nil { + log.ErrorC("bad hasData", err, logData) + return + } + return +} diff --git a/vendor/github.com/ONSdigital/dp-graph/neptune/mockedneptune.go b/vendor/github.com/ONSdigital/dp-graph/neptune/mockedneptune.go new file mode 100644 index 00000000..e48df189 --- /dev/null +++ b/vendor/github.com/ONSdigital/dp-graph/neptune/mockedneptune.go @@ -0,0 +1,24 @@ +package neptune + +import ( + "github.com/ONSdigital/dp-graph/neptune/internal" +) + +/* +This module provides the MockDB factory function to make a NeptuneDB into +which a mocked implementation of the gremgo driver's Pool may be injected +to avoid real database access. +*/ + +import ( + "github.com/ONSdigital/dp-graph/neptune/driver" +) + +// mockDB provides a NeptuneDB, into which you can pass a mocked +// NeptunePoolMock implementation, and thus write tests that bypass real +// database communication. +func mockDB(poolMock *internal.NeptunePoolMock) *NeptuneDB { + driver := driver.NeptuneDriver{Pool: poolMock} + db := &NeptuneDB{driver, 5, 30} + return db +} diff --git a/vendor/github.com/ONSdigital/dp-graph/neptune/neptune.go b/vendor/github.com/ONSdigital/dp-graph/neptune/neptune.go new file mode 100644 index 00000000..fdc89651 --- /dev/null +++ b/vendor/github.com/ONSdigital/dp-graph/neptune/neptune.go @@ -0,0 +1,258 @@ +package neptune + +import ( + "context" + "errors" + "fmt" + "math" + "math/rand" + "strings" + "time" + + "github.com/ONSdigital/dp-graph/neptune/driver" + "github.com/ONSdigital/go-ns/log" + "github.com/gedge/graphson" +) + +type NeptuneDB struct { + driver.NeptuneDriver + + maxAttempts int + timeout int +} + +func New(dbAddr string, size, timeout, retries int, errs chan error) (n *NeptuneDB, err error) { + // set defaults if not provided + if size == 0 { + size = 30 + } + if timeout == 0 { + timeout = 30 + } + if retries == 0 { + retries = 5 + } + + var d *driver.NeptuneDriver + if d, err = driver.New(context.Background(), dbAddr, errs); err != nil { + return + } + + // seed for sleepy() below + rand.Seed(time.Now().Unix()) + + n = &NeptuneDB{ + *d, + 1 + retries, + timeout, + } + return +} + +func (n *NeptuneDB) getVertices(gremStmt string) (vertices []graphson.Vertex, err error) { + logData := log.Data{"fn": "getVertices", "statement": gremStmt, "attempt": 1} + + var res interface{} + for attempt := 1; attempt < n.maxAttempts; attempt++ { + if attempt > 1 { + log.ErrorC("will retry", err, logData) + sleepy(attempt, 20*time.Millisecond) + logData["attempt"] = attempt + } + res, err = n.Pool.Get(gremStmt, nil, nil) + if err == nil { + var ok bool + if vertices, ok = res.([]graphson.Vertex); !ok { + err = errors.New("cannot cast Get results to []Vertex") + log.ErrorC("cast", err, logData) + return + } + // success + return + } + // XXX check err for non-retriable errors + if !isTransientError(err) { + return + } + } + // ASSERT: failed all attempts + log.ErrorC("maxAttempts reached", err, logData) + err = ErrAttemptsExceededLimit{err} + return + return +} + +func (n *NeptuneDB) getStringList(gremStmt string) (strings []string, err error) { + logData := log.Data{"fn": "getStringList", "statement": gremStmt, "attempt": 1} + + for attempt := 1; attempt < n.maxAttempts; attempt++ { + if attempt > 1 { + log.ErrorC("will retry", err, logData) + sleepy(attempt, 20*time.Millisecond) + logData["attempt"] = attempt + } + strings, err = n.Pool.GetStringList(gremStmt, nil, nil) + if err == nil { + return + } + // XXX check err for non-retriable errors + if !isTransientError(err) { + return + } + } + // ASSERT: failed all attempts + log.ErrorC("maxAttempts reached", err, logData) + err = ErrAttemptsExceededLimit{err} + return +} + + +func (n *NeptuneDB) getVertex(gremStmt string) (vertex graphson.Vertex, err error) { + logData := log.Data{"fn": "getVertex", "statement": gremStmt} + + var vertices []graphson.Vertex + if vertices, err = n.getVertices(gremStmt); err != nil { + log.ErrorC("get", err, logData) + return + } + if len(vertices) != 1 { + err = errors.New("expected one vertex") + log.ErrorC("not one", err, logData) + return + } + return vertices[0], nil +} + +func (n *NeptuneDB) getEdges(gremStmt string) (edges []graphson.Edge, err error) { + logData := log.Data{"fn": "getEdges", "statement": gremStmt, "attempt": 1} + + var res interface{} + for attempt := 1; attempt < n.maxAttempts; attempt++ { + if attempt > 1 { + log.ErrorC("will retry", err, logData) + sleepy(attempt, 20*time.Millisecond) + logData["attempt"] = attempt + } + res, err = n.Pool.GetE(gremStmt, nil, nil) + if err == nil { + // success + var ok bool + if edges, ok = res.([]graphson.Edge); !ok { + err = errors.New("cannot cast GetE results to []Edge") + log.ErrorC("cast", err, logData) + return + } + // return re-cast success + return + } + // XXX check err for non-retriable errors + if !isTransientError(err) { + return + } + } + // ASSERT: failed all attempts + log.ErrorC("maxAttempts reached", err, logData) + err = ErrAttemptsExceededLimit{err} + return +} + +func (n *NeptuneDB) exec(gremStmt string) (res interface{}, err error) { + logData := log.Data{"fn": "n.exec", "statement": gremStmt, "attempt": 1} + + for attempt := 1; attempt < n.maxAttempts; attempt++ { + if attempt > 1 { + log.ErrorC("will retry", err, logData) + sleepy(attempt, 20*time.Millisecond) + logData["attempt"] = attempt + } + if res, err = n.Pool.Execute(gremStmt, nil, nil); err == nil { + // success + if res == nil { + err = errors.New("res returned nil") + log.ErrorC("bad res", err, logData) + return + } + logData["exec_res"] = res + log.Info("exec ok", logData) + return + } + // XXX check err more thoroughly (isTransientError?) (non-err failures?) + if !isTransientError(err) { + return + } + } + // ASSERT: failed all attempts + log.ErrorC("maxAttempts reached", err, logData) + err = ErrAttemptsExceededLimit{err} + return +} + +func (n *NeptuneDB) getNumber(gremStmt string) (count int64, err error) { + logData := log.Data{"fn": "n.getNumber", "statement": gremStmt, "attempt": 1} + + for attempt := 1; attempt < n.maxAttempts; attempt++ { + if attempt > 1 { + log.ErrorC("will retry", err, logData) + sleepy(attempt, 20*time.Millisecond) + logData["attempt"] = attempt + } + if count, err = n.Pool.GetCount(gremStmt, nil, nil); err == nil { + // success, so return number + return + } + // XXX check non-nil err more thoroughly (isTransientError?) + if !isTransientError(err) { + return + } + } + // ASSERT: failed all attempts + log.ErrorC("maxAttempts reached", err, logData) + err = ErrAttemptsExceededLimit{err} + return +} + +// ErrAttemptsExceededLimit is returned when the number of attempts has reached +// the maximum permitted +type ErrAttemptsExceededLimit struct { + WrappedErr error +} + +func (e ErrAttemptsExceededLimit) Error() string { + return fmt.Sprintf("number of attempts to execute statement exceeded: %s", e.WrappedErr.Error()) +} + +/* +func (n *Neptune) checkAttempts(err error, instanceID string, attempt int) error { + if !isTransientError(err) { + log.Info("received an error from neptune that cannot be retried", + log.Data{"instance_id": instanceID, "error": err}) + + return err + } + + time.Sleep(getSleepTime(attempt, 20*time.Millisecond)) + + if attempt >= n.maxRetries { + return ErrAttemptsExceededLimit{err} + } + + return nil +} +*/ +func isTransientError(err error) bool { + if strings.Contains(err.Error(), " MALFORMED REQUEST ") || + strings.Contains(err.Error(), " INVALID REQUEST ARGUMENTS ") { + return false + } + return true +} + +// sleepy sleeps for a time which increases, based on the attempt and initial retry time. +// It uses the algorithm 2^n where n is the attempt number (double the previous) and +// a randomization factor of between 0-5ms so that the server isn't being hit constantly +// at the same time by many clients +func sleepy(attempt int, retryTime time.Duration) { + n := (math.Pow(2, float64(attempt))) + rnd := time.Duration(rand.Intn(4)+1) * time.Millisecond + time.Sleep((time.Duration(n) * retryTime) - rnd) +} diff --git a/vendor/github.com/ONSdigital/dp-graph/neptune/observation.go b/vendor/github.com/ONSdigital/dp-graph/neptune/observation.go new file mode 100644 index 00000000..9aa1c045 --- /dev/null +++ b/vendor/github.com/ONSdigital/dp-graph/neptune/observation.go @@ -0,0 +1,62 @@ +package neptune + +import ( + "context" + "errors" + "fmt" + "strings" + + "github.com/ONSdigital/dp-graph/neptune/query" + "github.com/ONSdigital/dp-graph/observation" + "github.com/ONSdigital/dp-observation-importer/models" +) + +// ErrEmptyFilter is returned if the provided filter is empty. +var ErrEmptyFilter = errors.New("filter is empty") + +func (n *NeptuneDB) StreamCSVRows(ctx context.Context, filter *observation.Filter, limit *int) (observation.StreamRowReader, error) { + if filter == nil { + return nil, ErrEmptyFilter + } + + q := fmt.Sprintf(query.GetInstanceHeader, filter.InstanceID) + + q += buildObservationsQuery(filter) + q += query.GetObservationSelectRowPart + + if limit != nil { + q += fmt.Sprintf(query.LimitPart, *limit) + } + + return n.Pool.OpenCursorCtx(ctx, q, nil, nil) +} + +func buildObservationsQuery(f *observation.Filter) string { + if f.IsEmpty() { + return fmt.Sprintf(query.GetAllObservationsPart, f.InstanceID) + } + + q := fmt.Sprintf(query.GetObservationsPart, f.InstanceID) + + for _, dim := range f.DimensionFilters { + if len(dim.Options) == 0 { + continue + } + + for i, opt := range dim.Options { + dim.Options[i] = fmt.Sprintf("'%s'", opt) + } + + q += fmt.Sprintf(query.GetObservationDimensionPart, f.InstanceID, dim.Name, strings.Join(dim.Options, ",")) + "," + } + + //remove trailing comma and close match statement + q = strings.Trim(q, ",") + q += ")" + + return q +} + +func (n *NeptuneDB) InsertObservationBatch(ctx context.Context, attempt int, instanceID string, observations []*models.Observation, dimensionIDs map[string]string) error { + return nil +} diff --git a/vendor/github.com/ONSdigital/dp-graph/neptune/query/query.go b/vendor/github.com/ONSdigital/dp-graph/neptune/query/query.go new file mode 100644 index 00000000..76c95f8f --- /dev/null +++ b/vendor/github.com/ONSdigital/dp-graph/neptune/query/query.go @@ -0,0 +1,96 @@ +package query + +const ( + // codelists + GetCodeLists = "g.V().hasLabel('_code_list')" + GetCodeListsFiltered = "g.V().hasLabel('_code_list').has('%s', true)" + GetCodeList = "g.V().hasLabel('_code_list').has('listID', '%s')" + CodeListExists = "g.V().hasLabel('_code_list').has('listID', '%s').count()" + CodeListEditionExists = "g.V().hasLabel('_code_list').has('listID', '%s').has('edition', '%s').count()" + GetCodes = "g.V().hasLabel('_code_list')" + + ".has('listID', '%s').has('edition', '%s')" + + ".in('usedBy').hasLabel('_code')" + CodeExists = "g.V().hasLabel('_code_list')" + + ".has('listID', '%s').has('edition', '%s')" + + ".in('usedBy').has('value', '%s').count()" + + /* + This query harvests data from both edges and nodes, so we collapse + the response to contain only strings - to make it parse-able with + the graphson string-list method. + + %s Parameters: codeListID, codeListEdition, codeValue + + Naming: + + r: usedBy relation + rl: usedBy.label + c: code node + d: dataset + de: dataset.edition + dv: dataset.version + */ + GetCodeDatasets = `g.V().hasLabel('_code_list').has('listID', '%s'). + has('edition','%s'). + inE('usedBy').as('r').values('label').as('rl').select('r'). + match( + __.as('r').outV().has('value','%s').as('c'), + __.as('c').out('inDataset').as('d'). + select('d').values('edition').as('de'). + select('d').values('version').as('dv'), + select('d').values('dataset_id').as('did'). + __.as('d').has('is_published',true)). + union(select('rl', 'de', 'dv', 'did')).unfold().select(values) + ` + + // hierarchy write + CloneHierarchyNodes = "g.V().hasLabel('_generic_hierarchy_node_%s').as('old')" + + ".addV('_hierarchy_node_%s_%s')" + + ".property('code',select('old').values('code'))" + + ".property('code_list','%s').as('new')" + + ".addE('clone_of').to('old').select('new')" + CountHierarchyNodes = "g.V().hasLabel('_hierarchy_node_%s_%s').count()" + CloneHierarchyRelationships = "g.V().hasLabel('_generic_hierarchy_node_%s').as('oc')" + + ".out('hasParent')" + + ".in('clone_of').hasLabel('_hierarchy_node_%s_%s')" + + ".addE('hasParent').from(select('oc').in('clone_of').hasLabel('_hierarchy_node_%s_%s'))" + RemoveCloneMarkers = "g.V().hasLabel('_hierarchy_node_%s_%s').outE('clone_of').drop()" + SetNumberOfChildren = "g.V().hasLabel('_hierarchy_node_%s_%s').property(single,'numberOfChildren',__.in('hasParent').count())" + SetHasData = "g.V().hasLabel('_hierarchy_node_%s_%s').as('v')" + + `.V().hasLabel('_%s_%s').as('c').where('v',eq('c')).by('code').by('value').` + + `select('v').property('hasData',true)` + MarkNodesToRemain = "g.V().hasLabel('_hierarchy_node_%s_%s').has('hasData').property('remain',true)" + + ".repeat(out('hasParent')).emit().property('remain',true)" + RemoveNodesNotMarkedToRemain = "g.V().hasLabel('_hierarchy_node_%s_%s').not(has('remain',true)).drop()" + RemoveRemainMarker = "g.V().hasLabel('_hierarchy_node_%s_%s').has('remain').properties('remain').drop()" + + // hierarchy read + HierarchyExists = "g.V().hasLabel('_hierarchy_node_%s_%s').limit(1)" + GetHierarchyRoot = "g.V().hasLabel('_hierarchy_node_%s_%s').not(outE('hasParent')).limit(1)" + GetHierarchyElement = "g.V().hasLabel('_hierarchy_node_%s_%s').has('code','%s')" + GetChildren = "g.V().hasLabel('_hierarchy_node_%s_%s').has('code','%s').in('hasParent').order().by('label')" + GetAncestry = "g.V().hasLabel('_hierarchy_node_%s_%s').has('code','%s').out('hasParent')" + + // instance - import process + CreateInstance = "g.addV('_%s_Instance').property(single,'header','%s')" + CountInstance = "g.V().hasLabel('_%s_Instance').count()" + AddInstanceDimensions = "g.V().hasLabel('_%s_Instance').property('dimensions',%s)" + CreateInstanceToCodeRelationship = "g.V().hasLabel('_%s_Instance').as('i').addE('inDataset').from(" + + "V().hasLabel('_code').has('value','%s').where(out('usedBy').has(label,'_code_list_%s'))" + + ")" + AddVersionDetailsToInstance = "g.V().hasLabel('_%s_Instance').property(single,'dataset_id','%s').property(single,'edition','%s').property(single,'version','%s')" + SetInstanceIsPublished = "g.V().hasLabel('_%s_Instance').property(single,'is_published',true)" + CountObservations = "g.V().hasLabel('_%s_observation').count()" + + // dimension + CreateDimensionToInstanceRelationship = "g.addV('_%s_%s').property('value','%s').as('d').addE('HAS_DIMENSION').from(V().hasLabel('_%s_Instance')).select('d').by(id)" + + // observation + GetInstanceHeader = "g.V().hasLabel('_%s_Instance').as('instance')" + GetAllObservationsPart = ".V().hasLabel('_%s_observation').values('row')" + + GetObservationsPart = ".V().hasLabel('_%s_observation').match(" + GetObservationDimensionPart = "__.as('row').out('isValueOf').hasLabel('_%s_%s').where(values('value').is(within(%s)))" + GetObservationSelectRowPart = ".select('instance', 'row').by('header').by('row').unfold().dedup().select(values)" + LimitPart = ".limit(%v)" +) diff --git a/vendor/github.com/ONSdigital/dp-graph/observation/observationtest/row_reader.go b/vendor/github.com/ONSdigital/dp-graph/observation/observationtest/row_reader.go index ed79c8b3..3a999a18 100755 --- a/vendor/github.com/ONSdigital/dp-graph/observation/observationtest/row_reader.go +++ b/vendor/github.com/ONSdigital/dp-graph/observation/observationtest/row_reader.go @@ -5,33 +5,38 @@ package observationtest import ( "context" + "github.com/ONSdigital/dp-graph/observation" "sync" ) var ( - lockCSVRowReaderMockClose sync.RWMutex - lockCSVRowReaderMockRead sync.RWMutex + lockStreamRowReaderMockClose sync.RWMutex + lockStreamRowReaderMockRead sync.RWMutex ) -// CSVRowReaderMock is a mock implementation of CSVRowReader. +// Ensure, that StreamRowReaderMock does implement StreamRowReader. +// If this is not the case, regenerate this file with moq. +var _ observation.StreamRowReader = &StreamRowReaderMock{} + +// StreamRowReaderMock is a mock implementation of StreamRowReader. // -// func TestSomethingThatUsesCSVRowReader(t *testing.T) { +// func TestSomethingThatUsesStreamRowReader(t *testing.T) { // -// // make and configure a mocked CSVRowReader -// mockedCSVRowReader := &CSVRowReaderMock{ +// // make and configure a mocked StreamRowReader +// mockedStreamRowReader := &StreamRowReaderMock{ // CloseFunc: func(in1 context.Context) error { -// panic("TODO: mock out the Close method") +// panic("mock out the Close method") // }, // ReadFunc: func() (string, error) { -// panic("TODO: mock out the Read method") +// panic("mock out the Read method") // }, // } // -// // TODO: use mockedCSVRowReader in code that requires CSVRowReader -// // and then make assertions. +// // use mockedStreamRowReader in code that requires StreamRowReader +// // and then make assertions. // // } -type CSVRowReaderMock struct { +type StreamRowReaderMock struct { // CloseFunc mocks the Close method. CloseFunc func(in1 context.Context) error @@ -52,58 +57,58 @@ type CSVRowReaderMock struct { } // Close calls CloseFunc. -func (mock *CSVRowReaderMock) Close(in1 context.Context) error { +func (mock *StreamRowReaderMock) Close(in1 context.Context) error { if mock.CloseFunc == nil { - panic("CSVRowReaderMock.CloseFunc: method is nil but CSVRowReader.Close was just called") + panic("StreamRowReaderMock.CloseFunc: method is nil but StreamRowReader.Close was just called") } callInfo := struct { In1 context.Context }{ In1: in1, } - lockCSVRowReaderMockClose.Lock() + lockStreamRowReaderMockClose.Lock() mock.calls.Close = append(mock.calls.Close, callInfo) - lockCSVRowReaderMockClose.Unlock() + lockStreamRowReaderMockClose.Unlock() return mock.CloseFunc(in1) } // CloseCalls gets all the calls that were made to Close. // Check the length with: -// len(mockedCSVRowReader.CloseCalls()) -func (mock *CSVRowReaderMock) CloseCalls() []struct { +// len(mockedStreamRowReader.CloseCalls()) +func (mock *StreamRowReaderMock) CloseCalls() []struct { In1 context.Context } { var calls []struct { In1 context.Context } - lockCSVRowReaderMockClose.RLock() + lockStreamRowReaderMockClose.RLock() calls = mock.calls.Close - lockCSVRowReaderMockClose.RUnlock() + lockStreamRowReaderMockClose.RUnlock() return calls } // Read calls ReadFunc. -func (mock *CSVRowReaderMock) Read() (string, error) { +func (mock *StreamRowReaderMock) Read() (string, error) { if mock.ReadFunc == nil { - panic("CSVRowReaderMock.ReadFunc: method is nil but CSVRowReader.Read was just called") + panic("StreamRowReaderMock.ReadFunc: method is nil but StreamRowReader.Read was just called") } callInfo := struct { }{} - lockCSVRowReaderMockRead.Lock() + lockStreamRowReaderMockRead.Lock() mock.calls.Read = append(mock.calls.Read, callInfo) - lockCSVRowReaderMockRead.Unlock() + lockStreamRowReaderMockRead.Unlock() return mock.ReadFunc() } // ReadCalls gets all the calls that were made to Read. // Check the length with: -// len(mockedCSVRowReader.ReadCalls()) -func (mock *CSVRowReaderMock) ReadCalls() []struct { +// len(mockedStreamRowReader.ReadCalls()) +func (mock *StreamRowReaderMock) ReadCalls() []struct { } { var calls []struct { } - lockCSVRowReaderMockRead.RLock() + lockStreamRowReaderMockRead.RLock() calls = mock.calls.Read - lockCSVRowReaderMockRead.RUnlock() + lockStreamRowReaderMockRead.RUnlock() return calls } diff --git a/vendor/github.com/ONSdigital/dp-graph/observation/reader.go b/vendor/github.com/ONSdigital/dp-graph/observation/reader.go index 853ee611..df99d2d0 100644 --- a/vendor/github.com/ONSdigital/dp-graph/observation/reader.go +++ b/vendor/github.com/ONSdigital/dp-graph/observation/reader.go @@ -6,14 +6,14 @@ import ( "io" ) -//go:generate moq -out observationtest/row_reader.go -pkg observationtest . CSVRowReader +//go:generate moq -out observationtest/row_reader.go -pkg observationtest . StreamRowReader // Check that the reader conforms to the io.reader interface. var _ io.Reader = (*Reader)(nil) // StreamRowReader provides a reader of individual rows (lines) of a CSV. type StreamRowReader interface { - Read() (string, error) + Read() (string, error) // TODO: this should take context Close(context.Context) error } @@ -89,7 +89,7 @@ func (reader *Reader) TotalBytesRead() int64 { return reader.totalBytesRead } -// ObservationsCount returns the total number of bytes read by this reader. +// ObservationsCount returns the total number of rows read by this reader. func (reader *Reader) ObservationsCount() int32 { return reader.obsCount } diff --git a/vendor/github.com/ONSdigital/gremgo-neptune/Dockerfile.gremlin b/vendor/github.com/ONSdigital/gremgo-neptune/Dockerfile.gremlin new file mode 100644 index 00000000..6216e5b0 --- /dev/null +++ b/vendor/github.com/ONSdigital/gremgo-neptune/Dockerfile.gremlin @@ -0,0 +1 @@ +FROM tinkerpop/gremlin-server \ No newline at end of file diff --git a/vendor/github.com/ONSdigital/gremgo-neptune/LICENSE.md b/vendor/github.com/ONSdigital/gremgo-neptune/LICENSE.md new file mode 100644 index 00000000..9cfe76e1 --- /dev/null +++ b/vendor/github.com/ONSdigital/gremgo-neptune/LICENSE.md @@ -0,0 +1,9 @@ +Copyright for portions of project `gremgo-neptune` are held by [Marcus Engvall, 2016] as part of project `gremgo`. +Some code (marked with comments) has been taken from project [gremgoser](https://github.com/intwinelabs/gremgoser). +All other copyright for project `gremgo-neptune` are held by [Phil Schwartz, 2018] and the contributing authors. + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/ONSdigital/gremgo-neptune/Makefile b/vendor/github.com/ONSdigital/gremgo-neptune/Makefile new file mode 100644 index 00000000..cfc07d91 --- /dev/null +++ b/vendor/github.com/ONSdigital/gremgo-neptune/Makefile @@ -0,0 +1,21 @@ +.DEFAULT_GOAL:= all + +.PHONY: all +all: vet test + +.PHONY: vet +vet: + @go vet -v + +.PHONY: test +test: + @go test -v + +.PHONY: test-bench +test-bench: + @go test -bench=. -race + +.PHONY: gremlin +gremlin: + @docker build -t gremgo-neptune/gremlin-server -f ./Dockerfile.gremlin . + @docker run -p 8182:8182 -t gremgo-neptune/gremlin-server \ No newline at end of file diff --git a/vendor/github.com/ONSdigital/gremgo-neptune/README.md b/vendor/github.com/ONSdigital/gremgo-neptune/README.md new file mode 100644 index 00000000..5e55b1b1 --- /dev/null +++ b/vendor/github.com/ONSdigital/gremgo-neptune/README.md @@ -0,0 +1,41 @@ +# gremgo-neptune + +[![GoDoc](http://img.shields.io/badge/godoc-reference-blue.svg)](http://godoc.org/github.com/ONSdigital/gremgo-neptune) [![Build Status](https://travis-ci.org/ONSdigital/gremgo-neptune.svg?branch=master)](https://travis-ci.org/ONSdigital/gremgo-neptune) [![Go Report Card](https://goreportcard.com/badge/github.com/ONSdigital/gremgo-neptune)](https://goreportcard.com/report/github.com/ONSdigital/gremgo-neptune) + +gremgo-neptune is a fork of [qasaur/gremgo](https://github.com/qasaur/gremgo) with alterations to make it compatible with [AWS Neptune](https://aws.amazon.com/neptune/) which is a "Fast, reliable graph database built for the cloud". + +gremgo is a fast, efficient, and easy-to-use client for the TinkerPop graph database stack. It is a Gremlin language driver which uses WebSockets to interface with Gremlin Server and has a strong emphasis on concurrency and scalability. Please keep in mind that gremgo is still under heavy development and although effort is being made to fully cover gremgo with reliable tests, bugs may be present in several areas. + +**Modifications were made to `gremgo` in order to "support" AWS Neptune's lack of Gremlin-specific features, like no support for query bindings, among others. See differences in Gremlin support here: [AWS Neptune Gremlin Implementation Differences](https://docs.aws.amazon.com/neptune/latest/userguide/access-graph-gremlin-differences.html)** + +Installation +========== +``` +go get github.com/ONSdigital/gremgo-neptune +dep ensure +``` + +Development +==== + +If you amend the `dialer` interface, please run: +``` +go generate +``` + +Documentation +========== + +* [GoDoc](https://godoc.org/github.com/ONSdigital/gremgo-neptune) + +Examples + +- [simple example](examples/simple/main.go) +- [cursor example](examples/cursor/main.go) +- [authentication example](examples/authentication/main.go) + - The plugin accepts authentication creating a secure dialer where credentials are set. + If the server needs authentication and you do not provide the credentials the complement will panic. + +License +========== +See [LICENSE](LICENSE.md) diff --git a/vendor/github.com/ONSdigital/gremgo-neptune/TODO.md b/vendor/github.com/ONSdigital/gremgo-neptune/TODO.md new file mode 100644 index 00000000..756e9586 --- /dev/null +++ b/vendor/github.com/ONSdigital/gremgo-neptune/TODO.md @@ -0,0 +1,8 @@ +# Todo list for gremgo + +* Add tests for connection (WebSockets etc.) +* Timeout for response retrieval +* Fix error handling in write and read workers +* Write UUIDv4 generator to reduce reliance on external library +* Change WebSocket library from gorilla/websocket to net/websocket +* Create mock TinkerPop server for testing diff --git a/vendor/github.com/ONSdigital/gremgo-neptune/client.go b/vendor/github.com/ONSdigital/gremgo-neptune/client.go new file mode 100644 index 00000000..f95c0eac --- /dev/null +++ b/vendor/github.com/ONSdigital/gremgo-neptune/client.go @@ -0,0 +1,501 @@ +package gremgo + +import ( + "bytes" + "context" + "fmt" + "io/ioutil" + "log" + "reflect" + "sync" + "time" + + "github.com/gedge/graphson" + "github.com/pkg/errors" +) + +var ( + ErrorConnectionDisposed = errors.New("you cannot write on a disposed connection") + ErrorNoGraphTags = errors.New("does not contain any graph tags") + ErrorUnsupportedPropertyType = errors.New("unsupported property map value type") +) + +// Client is a container for the gremgo client. +type Client struct { + conn dialer + requests chan []byte + responses chan []byte + results *sync.Map + responseNotifier *sync.Map // responseNotifier notifies the requester that a response has been completed for the request + chunkNotifier *sync.Map // chunkNotifier contains channels per requestID (if using cursors) which notifies the requester that a partial response has arrived + sync.RWMutex + Errored bool +} + +// NewDialer returns a WebSocket dialer to use when connecting to Gremlin Server +func NewDialer(host string, configs ...DialerConfig) (dialer *Ws) { + dialer = &Ws{ + timeout: 15 * time.Second, + pingInterval: 60 * time.Second, + writingWait: 15 * time.Second, + readingWait: 15 * time.Second, + connected: false, + quit: make(chan struct{}), + } + + for _, conf := range configs { + conf(dialer) + } + + dialer.host = host + return dialer +} + +func newClient() (c Client) { + c.requests = make(chan []byte, 3) // c.requests takes any request and delivers it to the WriteWorker for dispatch to Gremlin Server + c.responses = make(chan []byte, 3) // c.responses takes raw responses from ReadWorker and delivers it for sorting to handleResponse + c.results = &sync.Map{} + c.responseNotifier = &sync.Map{} + c.chunkNotifier = &sync.Map{} + return +} + +// Dial returns a gremgo client for interaction with the Gremlin Server specified in the host IP. +func Dial(conn dialer, errs chan error) (c Client, err error) { + return DialCtx(context.Background(), conn, errs) +} + +// DialCtx returns a gremgo client for interaction with the Gremlin Server specified in the host IP. +func DialCtx(ctx context.Context, conn dialer, errs chan error) (c Client, err error) { + c = newClient() + c.conn = conn + + // Connects to Gremlin Server + err = conn.connectCtx(ctx) + if err != nil { + return + } + + msgChan := make(chan []byte, 200) + + go c.writeWorkerCtx(ctx, errs) + go c.readWorkerCtx(ctx, msgChan, errs) + go c.saveWorkerCtx(ctx, msgChan, errs) + go conn.pingCtx(ctx, errs) + + return +} + +func (c *Client) executeRequest(query string, bindings, rebindings map[string]string) (resp []Response, err error) { + return c.executeRequestCtx(context.Background(), query, bindings, rebindings) +} +func (c *Client) executeRequestCtx(ctx context.Context, query string, bindings, rebindings map[string]string) (resp []Response, err error) { + var req request + var id string + req, id, err = prepareRequest(query, bindings, rebindings) + if err != nil { + return + } + + msg, err := packageRequest(req) + if err != nil { + log.Println(err) + return + } + c.responseNotifier.Store(id, make(chan error, 1)) + c.dispatchRequestCtx(ctx, msg) + resp, err = c.retrieveResponseCtx(ctx, id) + if err != nil { + err = errors.Wrapf(err, "query: %s", query) + } + return +} +func (c *Client) executeRequestCursorCtx(ctx context.Context, query string, bindings, rebindings map[string]string) (cursor *Cursor, err error) { + var req request + var id string + if req, id, err = prepareRequest(query, bindings, rebindings); err != nil { + return + } + + var msg []byte + if msg, err = packageRequest(req); err != nil { + log.Println(err) + return + } + c.responseNotifier.Store(id, make(chan error, 1)) + c.chunkNotifier.Store(id, make(chan bool, 10)) + if c.dispatchRequestCtx(ctx, msg); err != nil { + err = errors.Wrap(err, "executeRequestCursorCtx") + return + } + + cursor = &Cursor{ + ID: id, + client: c, + } + return +} + +func (c *Client) authenticate(requestID string) (err error) { + auth := c.conn.getAuth() + req, err := prepareAuthRequest(requestID, auth.username, auth.password) + if err != nil { + return + } + + msg, err := packageRequest(req) + if err != nil { + log.Println(err) + return + } + + c.dispatchRequest(msg) + return +} + +// Execute formats a raw Gremlin query, sends it to Gremlin Server, and returns the result. +func (c *Client) Execute(query string, bindings, rebindings map[string]string) (resp []Response, err error) { + return c.ExecuteCtx(context.Background(), query, bindings, rebindings) +} +func (c *Client) ExecuteCtx(ctx context.Context, query string, bindings, rebindings map[string]string) (resp []Response, err error) { + if c.conn.IsDisposed() { + return resp, ErrorConnectionDisposed + } + return c.executeRequestCtx(ctx, query, bindings, rebindings) +} + +// ExecuteFile takes a file path to a Gremlin script, sends it to Gremlin Server, and returns the result. +func (c *Client) ExecuteFile(path string, bindings, rebindings map[string]string) (resp []Response, err error) { + if c.conn.IsDisposed() { + return resp, ErrorConnectionDisposed + } + d, err := ioutil.ReadFile(path) // Read script from file + if err != nil { + log.Println(err) + return + } + query := string(d) + return c.executeRequest(query, bindings, rebindings) +} + +// Get formats a raw Gremlin query, sends it to Gremlin Server, and populates the passed []interface. +func (c *Client) Get(query string, bindings, rebindings map[string]string) (res []graphson.Vertex, err error) { + return c.GetCtx(context.Background(), query, bindings, rebindings) +} + +// GetCtx - execute a gremlin command and return the response as vertices +func (c *Client) GetCtx(ctx context.Context, query string, bindings, rebindings map[string]string) (res []graphson.Vertex, err error) { + if c.conn.IsDisposed() { + err = ErrorConnectionDisposed + return + } + + var resp []Response + resp, err = c.executeRequestCtx(ctx, query, bindings, rebindings) + if err != nil { + return + } + return c.deserializeResponseToVertices(resp) +} + +func (c *Client) deserializeResponseToVertices(resp []Response) (res []graphson.Vertex, err error) { + if len(resp) == 0 || resp[0].Status.Code == statusNoContent { + return + } + + for _, item := range resp { + resN, err := graphson.DeserializeListOfVerticesFromBytes(item.Result.Data) + if err != nil { + panic(err) + } + res = append(res, resN...) + } + return +} + +// OpenCursorCtx initiates a query on the database, returning a cursor used to iterate over the results as they arrive +func (c *Client) OpenCursorCtx(ctx context.Context, query string, bindings, rebindings map[string]string) (cursor *Cursor, err error) { + if c.conn.IsDisposed() { + err = ErrorConnectionDisposed + return + } + return c.executeRequestCursorCtx(ctx, query, bindings, rebindings) +} + +// ReadCursorCtx returns the next set of results, deserialized as []Vertex, for the cursor +// - `res` may be empty when results were read by a previous call +// - `eof` will be true when no more results are available +func (c *Client) ReadCursorCtx(ctx context.Context, cursor *Cursor) (res []graphson.Vertex, eof bool, err error) { + var resp []Response + if resp, eof, err = c.retrieveNextResponseCtx(ctx, cursor); err != nil { + err = errors.Wrapf(err, "ReadCursorCtx: %s", cursor.ID) + return + } + + if res, err = c.deserializeResponseToVertices(resp); err != nil { + err = errors.Wrapf(err, "ReadCursorCtx: %s", cursor.ID) + return + } + return +} + +// GetE formats a raw Gremlin query, sends it to Gremlin Server, and populates the passed []interface. +func (c *Client) GetE(query string, bindings, rebindings map[string]string) (res []graphson.Edge, err error) { + return c.GetEdgeCtx(context.Background(), query, bindings, rebindings) +} +func (c *Client) GetEdgeCtx(ctx context.Context, query string, bindings, rebindings map[string]string) (res []graphson.Edge, err error) { + if c.conn.IsDisposed() { + err = ErrorConnectionDisposed + return + } + + resp, err := c.executeRequestCtx(ctx, query, bindings, rebindings) + if err != nil { + return + } + if len(resp) == 0 || resp[0].Status.Code == statusNoContent { + return + } + + for _, item := range resp { + var resN []graphson.Edge + if resN, err = graphson.DeserializeListOfEdgesFromBytes(item.Result.Data); err != nil { + return + } + res = append(res, resN...) + } + return +} + +// GetCount returns the count element returned by an Execute() +func (c *Client) GetCount(query string, bindings, rebindings map[string]string) (i int64, err error) { + return c.GetCountCtx(context.Background(), query, bindings, rebindings) +} +func (c *Client) GetCountCtx(ctx context.Context, query string, bindings, rebindings map[string]string) (i int64, err error) { + var res []Response + if res, err = c.ExecuteCtx(ctx, query, bindings, rebindings); err != nil { + return + } + if len(res) > 1 { + err = errors.New("GetCount: expected one result, got more than one") + return + } else if len(res) == 0 { + err = errors.New("GetCount: expected one result, got zero") + return + } + if i, err = graphson.DeserializeNumber(res[0].Result.Data); err != nil { + return + } + return +} + +// GetStringList returns the list of string elements returned by an Execute() (e.g. from `...().properties('p').value()`) +func (c *Client) GetStringList(query string, bindings, rebindings map[string]string) (vals []string, err error) { + return c.GetStringListCtx(context.Background(), query, bindings, rebindings) +} +func (c *Client) GetStringListCtx(ctx context.Context, query string, bindings, rebindings map[string]string) (vals []string, err error) { + var res []Response + if res, err = c.ExecuteCtx(ctx, query, bindings, rebindings); err != nil { + return + } + for _, resN := range res { + var valsN []string + if valsN, err = graphson.DeserializeStringListFromBytes(resN.Result.Data); err != nil { + return + } + vals = append(vals, valsN...) + } + return +} + +// GetProperties returns a map of string to interface{} returned by an Execute() for vertex .properties() +func (c *Client) GetProperties(query string, bindings, rebindings map[string]string) (vals map[string][]interface{}, err error) { + return c.GetPropertiesCtx(context.Background(), query, bindings, rebindings) +} +func (c *Client) GetPropertiesCtx(ctx context.Context, query string, bindings, rebindings map[string]string) (vals map[string][]interface{}, err error) { + var res []Response + if res, err = c.ExecuteCtx(ctx, query, bindings, rebindings); err != nil { + return + } + vals = make(map[string][]interface{}) + for _, resN := range res { + if err = graphson.DeserializePropertiesFromBytes(resN.Result.Data, vals); err != nil { + return + } + } + return +} + +// GremlinForVertex returns the addV()... and V()... gremlin commands for `data` +// Because of possible multiples, it does not start with `g.` (it probably should? XXX ) +// (largely taken from https://github.com/intwinelabs/gremgoser) +func GremlinForVertex(label string, data interface{}) (gremAdd, gremGet string, err error) { + gremAdd = fmt.Sprintf("addV('%s')", label) + gremGet = fmt.Sprintf("V('%s')", label) + + d := reflect.ValueOf(data) + id := d.FieldByName("Id") + if id.IsValid() { + if idField, ok := d.Type().FieldByName("Id"); ok { + tag := idField.Tag.Get("graph") + name, opts := parseTag(tag) + if len(name) == 0 && len(opts) == 0 { + gremAdd += fmt.Sprintf(".property(id,'%s')", id) + gremGet += fmt.Sprintf(".hasId('%s')", id) + } + } + } + + missingTag := true + + for i := 0; i < d.NumField(); i++ { + tag := d.Type().Field(i).Tag.Get("graph") + name, opts := parseTag(tag) + if (len(name) == 0 || name == "-") && len(opts) == 0 { + continue + } + missingTag = false + val := d.Field(i).Interface() + if len(opts) == 0 { + err = fmt.Errorf("interface field tag %q does not contain a tag option type, field type: %T", name, val) + return + } + if !d.Field(i).IsValid() { + continue + } + if opts.Contains("id") { + if val != "" { + gremAdd += fmt.Sprintf(".property(id,'%s')", val) + gremGet += fmt.Sprintf(".hasId('%s')", val) + } + } else if opts.Contains("string") { + if val != "" { + gremAdd += fmt.Sprintf(".property('%s','%s')", name, escapeStringy(val.(string))) + gremGet += fmt.Sprintf(".has('%s','%s')", name, escapeStringy(val)) + } + } else if opts.Contains("bool") || opts.Contains("number") || opts.Contains("other") { + gremAdd += fmt.Sprintf(".property('%s',%v)", name, val) + gremGet += fmt.Sprintf(".has('%s',%v)", name, val) + } else if opts.Contains("[]string") { + s := reflect.ValueOf(val) + for i := 0; i < s.Len(); i++ { + gremAdd += fmt.Sprintf(".property('%s','%s')", name, escapeStringy(s.Index(i).Interface())) + gremGet += fmt.Sprintf(".has('%s','%s')", name, escapeStringy(s.Index(i).Interface())) + } + } else if opts.Contains("[]bool") || opts.Contains("[]number") || opts.Contains("[]other") { + s := reflect.ValueOf(val) + for i := 0; i < s.Len(); i++ { + gremAdd += fmt.Sprintf(".property('%s',%v)", name, s.Index(i).Interface()) + gremGet += fmt.Sprintf(".has('%s',%v)", name, s.Index(i).Interface()) + } + } else { + err = fmt.Errorf("interface field tag needs recognised option, field: %q, tag: %q", d.Type().Field(i).Name, tag) + return + } + } + + if missingTag { + // this err is effectively a warning for gremGet (can be ignored, unless no Id) + err = ErrorNoGraphTags + return + } + return +} + +// AddV takes a label and an interface and adds it as a vertex to the graph +func (c *Client) AddV(label string, data interface{}, bindings, rebindings map[string]string) (vert graphson.Vertex, err error) { + return c.AddVertexCtx(context.Background(), label, data, bindings, rebindings) +} +func (c *Client) AddVertexCtx(ctx context.Context, label string, data interface{}, bindings, rebindings map[string]string) (vert graphson.Vertex, err error) { + if c.conn.IsDisposed() { + return vert, ErrorConnectionDisposed + } + + q, _, err := GremlinForVertex(label, data) + if err != nil && err != ErrorNoGraphTags { + panic(err) // XXX + } + q = "g." + q + + var resp []Response + if resp, err = c.ExecuteCtx(ctx, q, bindings, rebindings); err != nil { + return + } + + if len(resp) != 1 { + return vert, fmt.Errorf("AddV should receive 1 response, got %d", len(resp)) + } + + for _, res := range resp { // XXX one result, so should not need loop + var result []graphson.Vertex + if result, err = graphson.DeserializeListOfVerticesFromBytes(res.Result.Data); err != nil { + return + } + if len(result) != 1 { + return vert, fmt.Errorf("AddV should receive 1 vertex, got %d", len(result)) + } + + vert = result[0] + } + return +} + +// AddE takes a label, from UUID and to UUID (and optional props map) and creates an edge between the two vertex in the graph +func (c *Client) AddE(label, fromId, toId string, props map[string]interface{}) (resp interface{}, err error) { + return c.AddEdgeCtx(context.Background(), label, fromId, toId, props) +} +func (c *Client) AddEdgeCtx(ctx context.Context, label, fromId, toId string, props map[string]interface{}) (resp interface{}, err error) { + if c.conn.IsDisposed() { + return nil, ErrorConnectionDisposed + } + + var propStr string + if propStr, err = buildProps(props); err != nil { + return + } + q := fmt.Sprintf("g.addE('%s').from(g.V().hasId('%s')).to(g.V().hasId('%s'))%s", label, fromId, toId, propStr) + resp, err = c.ExecuteCtx(ctx, q, nil, nil) + return +} + +// Close closes the underlying connection and marks the client as closed. +func (c *Client) Close() { + if c.conn != nil { + c.conn.close() + } +} + +// buildProps converts a map[string]interfaces to be used as properties on an edge +// (largely taken from https://github.com/intwinelabs/gremgoser) +func buildProps(props map[string]interface{}) (q string, err error) { + for k, v := range props { + t := reflect.ValueOf(v).Kind() + if t == reflect.String { + q += fmt.Sprintf(".property('%s', '%s')", k, escapeStringy(v)) + } else if t == reflect.Bool || t == reflect.Int || t == reflect.Int8 || t == reflect.Int16 || t == reflect.Int32 || t == reflect.Int64 || t == reflect.Uint || t == reflect.Uint8 || t == reflect.Uint16 || t == reflect.Uint32 || t == reflect.Uint64 || t == reflect.Float32 || t == reflect.Float64 { + q += fmt.Sprintf(".property('%s', %v)", k, v) + } else if t == reflect.Slice { + s := reflect.ValueOf(v) + for i := 0; i < s.Len(); i++ { + q += fmt.Sprintf(".property('%s', '%s')", k, escapeStringy(s.Index(i).Interface())) + } + } else { + return "", ErrorUnsupportedPropertyType + } + } + return +} + +// escapeStringy takes a string and escapes some characters +// (largely taken from https://github.com/intwinelabs/gremgoser) +func escapeStringy(stringy interface{}) string { + var buf bytes.Buffer + for _, char := range stringy.(string) { + switch char { + case '\'', '"', '\\': + buf.WriteRune('\\') + } + buf.WriteRune(char) + } + return buf.String() +} diff --git a/vendor/github.com/ONSdigital/gremgo-neptune/configuration.go b/vendor/github.com/ONSdigital/gremgo-neptune/configuration.go new file mode 100644 index 00000000..8ed566d9 --- /dev/null +++ b/vendor/github.com/ONSdigital/gremgo-neptune/configuration.go @@ -0,0 +1,42 @@ +package gremgo + +import "time" + +//DialerConfig is the struct for defining configuration for WebSocket dialer +type DialerConfig func(*Ws) + +//SetAuthentication sets on dialer credentials for authentication +func SetAuthentication(username string, password string) DialerConfig { + return func(c *Ws) { + c.auth = &auth{username: username, password: password} + } +} + +//SetTimeout sets the dial timeout +func SetTimeout(seconds int) DialerConfig { + return func(c *Ws) { + c.timeout = time.Duration(seconds) * time.Second + } +} + +//SetPingInterval sets the interval of ping sending for know is +//connection is alive and in consequence the client is connected +func SetPingInterval(seconds int) DialerConfig { + return func(c *Ws) { + c.pingInterval = time.Duration(seconds) * time.Second + } +} + +//SetWritingWait sets the time for waiting that writing occur +func SetWritingWait(seconds int) DialerConfig { + return func(c *Ws) { + c.writingWait = time.Duration(seconds) * time.Second + } +} + +//SetReadingWait sets the time for waiting that reading occur +func SetReadingWait(seconds int) DialerConfig { + return func(c *Ws) { + c.readingWait = time.Duration(seconds) * time.Second + } +} diff --git a/vendor/github.com/ONSdigital/gremgo-neptune/connection.go b/vendor/github.com/ONSdigital/gremgo-neptune/connection.go new file mode 100644 index 00000000..d2d16ca4 --- /dev/null +++ b/vendor/github.com/ONSdigital/gremgo-neptune/connection.go @@ -0,0 +1,270 @@ +package gremgo + +import ( + "context" + "net/http" + "sync" + "time" + + "github.com/gorilla/websocket" + "github.com/pkg/errors" +) + +//go:generate moq -out dialer_moq_test.go . dialer + +type dialer interface { + connect() error + connectCtx(context.Context) error + IsConnected() bool + IsDisposed() bool + write([]byte) error + read() (int, []byte, error) + readCtx(context.Context, chan message) + close() error + getAuth() *auth + ping(errs chan error) + pingCtx(context.Context, chan error) +} + +///// +/* +WebSocket Connection +*/ +///// + +// Ws is the dialer for a WebSocket connection +type Ws struct { + host string + conn *websocket.Conn + auth *auth + disposed bool + connected bool + pingInterval time.Duration + writingWait time.Duration + readingWait time.Duration + timeout time.Duration + quit chan struct{} + sync.RWMutex +} + +//Auth is the container for authentication data of dialer +type auth struct { + username string + password string +} + +func (ws *Ws) connect() (err error) { + return ws.connectCtx(context.Background()) +} + +func (ws *Ws) connectCtx(ctx context.Context) (err error) { + d := websocket.Dialer{ + WriteBufferSize: 512 * 1024, + ReadBufferSize: 512 * 1024, + HandshakeTimeout: 5 * time.Second, // Timeout or else we'll hang forever and never fail on bad hosts. + } + ws.conn, _, err = d.DialContext(ctx, ws.host, http.Header{}) + if err != nil { + return + } + ws.connected = true + ws.conn.SetPongHandler(func(appData string) error { + ws.Lock() + ws.connected = true + ws.Unlock() + return nil + }) + return +} + +// IsConnected returns whether the underlying websocket is connected +func (ws *Ws) IsConnected() bool { + return ws.connected +} + +// IsDisposed returns whether the underlying websocket is disposed +func (ws *Ws) IsDisposed() bool { + return ws.disposed +} + +func (ws *Ws) write(msg []byte) (err error) { + // XXX want to do locking here? + // ws.RWMutex.Lock() + // defer ws.RWMutex.Unlock() + err = ws.conn.WriteMessage(websocket.BinaryMessage, msg) + return +} + +func (ws *Ws) read() (msgType int, msg []byte, err error) { + // XXX want to do locking here? + // ws.RWMutex.RLock() + // defer ws.RWMutex.RUnlock() + msgType, msg, err = ws.conn.ReadMessage() + return +} + +func (ws *Ws) readCtx(ctx context.Context, rxMsgChan chan message) { + // XXX want to do locking here? + // ws.RWMutex.RLock() + // defer ws.RWMutex.RUnlock() + for { + select { + case <-ctx.Done(): + return + default: + msgType, msg, err := ws.conn.ReadMessage() + rxMsgChan <- message{msgType, msg, err} + if msgType == -1 { + return + } + } + } +} + +func (ws *Ws) close() (err error) { + defer func() { + close(ws.quit) + ws.conn.Close() + ws.disposed = true + }() + + // XXX want to do locking here? + // ws.RWMutex.Lock() + // defer ws.RWMutex.Unlock() + err = ws.conn.WriteMessage(websocket.CloseMessage, websocket.FormatCloseMessage(websocket.CloseNormalClosure, "")) //Cleanly close the connection with the server + return +} + +func (ws *Ws) getAuth() *auth { + if ws.auth == nil { + panic("You must create a Secure Dialer for authenticating with the server") + } + return ws.auth +} + +func (ws *Ws) ping(errs chan error) { + ws.pingCtx(context.Background(), errs) +} + +func (ws *Ws) pingCtx(ctx context.Context, errs chan error) { + ticker := time.NewTicker(ws.pingInterval) + defer ticker.Stop() + for { + select { + case <-ticker.C: + connected := true + if err := ws.conn.WriteControl(websocket.PingMessage, []byte{}, time.Now().Add(ws.writingWait)); err != nil { + errs <- err + connected = false + } + ws.Lock() + ws.connected = connected + ws.Unlock() + case <-ctx.Done(): + return + case <-ws.quit: + return + } + } +} + +// writeWorker works on a loop and dispatches messages as soon as it receives them +func (c *Client) writeWorker(errs chan error, quit chan struct{}) { + for { + select { + case msg := <-c.requests: + c.Lock() + err := c.conn.write(msg) + if err != nil { + errs <- err + c.Errored = true + c.Unlock() + break + } + c.Unlock() + + case <-quit: + return + } + } +} + +// writeWorkerCtx works on a loop and dispatches messages as soon as it receives them +func (c *Client) writeWorkerCtx(ctx context.Context, errs chan error) { + for { + select { + case msg := <-c.requests: + c.Lock() + err := c.conn.write(msg) + if err != nil { + errs <- err + c.Errored = true + c.Unlock() + break + } + c.Unlock() + + case <-ctx.Done(): + return + } + } +} + +func (c *Client) readWorker(errs chan error, quit chan struct{}) { // readWorker works on a loop and sorts messages as soon as it receives them + for { + msgType, msg, err := c.conn.read() + if msgType == -1 { // msgType == -1 is noFrame (close connection) + return + } + if err != nil { + errs <- errors.Wrapf(err, "Receive message type: %d", msgType) + c.Errored = true + break + } + if msg != nil { + if err = c.handleResponse(msg); err != nil { + // XXX this makes the err fatal + errs <- errors.Wrapf(err, "handleResponse fail: %q", msg) + c.Errored = true + } + } + + select { + case <-quit: + return + default: + continue + } + } +} + +type message struct { + mType int + msg []byte + err error +} + +// readWorkerCtx works on a loop and sorts read messages as soon as it receives them +func (c *Client) readWorkerCtx(ctx context.Context, msgs chan []byte, errs chan error) { + receivedMsgChan := make(chan message, 100) + go c.conn.readCtx(ctx, receivedMsgChan) + + for i := 0; ; i++ { + select { + case <-ctx.Done(): + return + case msg := <-receivedMsgChan: + if msg.mType == -1 { // msgType == -1 is noFrame (close connection) + return + } + if msg.err != nil { + errs <- errors.Wrapf(msg.err, "Receive message type: %d", msg.mType) + c.Errored = true + return + } + if msg.msg != nil { + msgs <- msg.msg + } + } + } +} diff --git a/vendor/github.com/ONSdigital/gremgo-neptune/cursor.go b/vendor/github.com/ONSdigital/gremgo-neptune/cursor.go new file mode 100644 index 00000000..5351bc74 --- /dev/null +++ b/vendor/github.com/ONSdigital/gremgo-neptune/cursor.go @@ -0,0 +1,85 @@ +package gremgo + +import ( + "context" + "io" + "sync" + + "github.com/davecgh/go-spew/spew" + "github.com/gedge/graphson" + "github.com/pkg/errors" +) + +// Cursor allows for results to be iterated over as soon as available, rather than waiting for +// a query to complete and all results to be returned in one block. +type Cursor struct { + ID string + mu sync.RWMutex + eof bool + buffer []string + client *Client +} + +// Read a string response from the cursor, reading from the buffer of previously retrieved responses +// when possible. When the buffer is empty, Read uses the cursor's client to retrieve further +// responses from the database. As this function does not take context, a number of attempts +// is hardcoded in refillBuffer() to prevent an infinite wait for further responses. +func (c *Cursor) Read() (string, error) { + if len(c.buffer) == 0 { + if c.eof { + return "", io.EOF + } + + if err := c.refillBuffer(); err != nil { + return "", err + } + } + + s := c.buffer[0] + "\n" + spew.Dump("cursor string: " + s) + + if len(c.buffer) > 1 { + c.buffer = c.buffer[1:] + } else { + c.buffer = []string{} + } + + return s, nil + +} + +func (c *Cursor) refillBuffer() error { + var resp []Response + var err error + + var attempts int + for resp == nil && !c.eof || attempts > 5 { //resp could be empty if reading too quickly + attempts++ + if resp, c.eof, err = c.client.retrieveNextResponseCtx(context.Background(), c); err != nil { + err = errors.Wrapf(err, "cursor.Read: %s", c.ID) + return err + } + } + + //gremlin has returned a validly formed 'no content' response + if len(resp) == 1 && &resp[0].Status != nil && resp[0].Status.Code == 204 { + return io.ErrUnexpectedEOF + } + + if c.buffer, err = graphson.DeserializeStringListFromBytes(resp[0].Result.Data); err != nil { + return err + } + + if len(c.buffer) == 0 { + return errors.New("no results deserialized") + } + + return nil +} + +// Close satisfies the ReadCloser interface. The cursor does not need to close any +// resources, as the contained client holds the connection, and this is closed +// by the defered close in OpenCursorCtx +func (c *Cursor) Close(ctx context.Context) error { + return nil +} diff --git a/vendor/github.com/ONSdigital/gremgo-neptune/go.mod b/vendor/github.com/ONSdigital/gremgo-neptune/go.mod new file mode 100644 index 00000000..a9d49683 --- /dev/null +++ b/vendor/github.com/ONSdigital/gremgo-neptune/go.mod @@ -0,0 +1,9 @@ +module github.com/ONSdigital/gremgo-neptune + +require ( + github.com/gedge/graphson v0.0.0-20190531092426-d39cb8fe4384 + github.com/gofrs/uuid v3.2.0+incompatible + github.com/gorilla/websocket v1.4.0 + github.com/pkg/errors v0.8.1 + github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a +) diff --git a/vendor/github.com/ONSdigital/gremgo-neptune/go.sum b/vendor/github.com/ONSdigital/gremgo-neptune/go.sum new file mode 100644 index 00000000..a672d64d --- /dev/null +++ b/vendor/github.com/ONSdigital/gremgo-neptune/go.sum @@ -0,0 +1,21 @@ +github.com/gedge/graphson v0.0.0-20190531092426-d39cb8fe4384 h1:WnFZkCrqH8PJFxQtp3EG0GKcEneNwqS3hzYDr6d7ctE= +github.com/gedge/graphson v0.0.0-20190531092426-d39cb8fe4384/go.mod h1:Ehgz7wAEVmSkFMIY2WFsi33IZXvzrgBVsro51AEIkq0= +github.com/gofrs/uuid v3.2.0+incompatible h1:y12jRkkFxsd7GpqdSZ+/KCs/fJbqpEXSGd4+jfEaewE= +github.com/gofrs/uuid v3.2.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= +github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8= +github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/gorilla/websocket v1.4.0 h1:WDFjx/TMzVgy9VdMMQi2K2Emtwi2QcUQsztZ/zLaH/Q= +github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= +github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo= +github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= +github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM= +github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= +github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a h1:pa8hGb/2YqsZKovtsgrwcDH1RZhVbTKCjLp47XpqCDs= +github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= diff --git a/vendor/github.com/ONSdigital/gremgo-neptune/pool.go b/vendor/github.com/ONSdigital/gremgo-neptune/pool.go new file mode 100644 index 00000000..74073257 --- /dev/null +++ b/vendor/github.com/ONSdigital/gremgo-neptune/pool.go @@ -0,0 +1,524 @@ +package gremgo + +import ( + "context" + "io/ioutil" + "log" + "sync" + "time" + + "github.com/gedge/graphson" + "github.com/pkg/errors" +) + +const connRequestQueueSize = 1000000 + +// errors +var ( + ErrGraphDBClosed = errors.New("graphdb is closed") + ErrBadConn = errors.New("bad conn") +) + +// Pool maintains a list of connections. +type Pool struct { + MaxOpen int + MaxLifetime time.Duration + dial func() (*Client, error) + mu sync.Mutex + freeConns []*conn + open int + openerCh chan struct{} + connRequests map[uint64]chan connRequest + nextRequest uint64 + cleanerCh chan struct{} + closed bool +} + +// NewPool create ConnectionPool +func NewPool(dial func() (*Client, error)) *Pool { + p := new(Pool) + p.dial = dial + p.openerCh = make(chan struct{}, connRequestQueueSize) + p.connRequests = make(map[uint64]chan connRequest) + + go p.opener() + + return p +} + +// NewPoolWithDialerCtx returns a NewPool that uses a contextual dialer to dbURL, +// errs is a chan that receives any errors from the ping/read/write workers for the connection +func NewPoolWithDialerCtx(ctx context.Context, dbURL string, errs chan error, cfgs ...DialerConfig) *Pool { + dialFunc := func() (*Client, error) { + dialer := NewDialer(dbURL, cfgs...) + cli, err := DialCtx(ctx, dialer, errs) + return &cli, err + } + return NewPool(dialFunc) +} + +type connRequest struct { + *conn + err error +} + +// conn represents a shared and reusable connection. +type conn struct { + Pool *Pool + Client *Client + t time.Time +} + +// maybeOpenNewConnections initiates new connections if capacity allows (must be locked) +func (p *Pool) maybeOpenNewConnections() { + if p.closed { + return + } + numRequests := len(p.connRequests) + if p.MaxOpen > 0 { + numCanOpen := p.MaxOpen - p.open + if numRequests > numCanOpen { + numRequests = numCanOpen + } + } + for numRequests > 0 { + p.open++ + numRequests-- + p.openerCh <- struct{}{} + } +} + +func (p *Pool) opener() { + for range p.openerCh { + if err := p.openNewConnection(); err != nil { + // gutil.WarnLev(1, "failed opener "+err.Error()) XXX + } + } +} + +type so struct { + tryOpening bool + alreadyLocked bool + conn *conn +} + +// subtractOpen reduces p.open (count), unlocks. Optionally: locks, maybeOpenNewConnections, conn.Client.Close +func (p *Pool) subtractOpen(opts so, err error) error { + if !opts.alreadyLocked { + p.mu.Lock() + } + p.open-- + if opts.tryOpening { + p.maybeOpenNewConnections() + } + p.mu.Unlock() + if opts.conn != nil { + opts.conn.Client.Close() + } + return err +} + +func (p *Pool) openNewConnection() (err error) { + if p.closed { + return p.subtractOpen(so{}, errors.Errorf("failed to openNewConnection - pool closed")) + } + var c *Client + c, err = p.dial() + if err != nil { + return p.subtractOpen(so{tryOpening: true}, errors.Wrapf(err, "failed to openNewConnection - dial")) + } + cn := &conn{ + Pool: p, + Client: c, + t: time.Now(), + } + p.mu.Lock() + if !p.putConnLocked(cn, nil) { + return p.subtractOpen(so{alreadyLocked: true, conn: cn}, errors.Errorf("failed to openNewConnection - connLocked")) + } + p.mu.Unlock() + return +} + +// putConn releases a connection back to the connection pool. +func (p *Pool) putConn(cn *conn, err error) error { + p.mu.Lock() + if !p.putConnLocked(cn, err) { + return p.subtractOpen(so{alreadyLocked: true, conn: cn}, err) + } + p.mu.Unlock() + return err +} + +// putConnLocked releases a connection back to the connection pool (must be locked) +// returns false when unable to do so (pool is closed, open is at max) +func (p *Pool) putConnLocked(cn *conn, err error) bool { + if p.closed { + return false + } + if p.MaxOpen > 0 && p.open >= p.MaxOpen { + return false + } + if len(p.connRequests) > 0 { + var req chan connRequest + var reqKey uint64 + for reqKey, req = range p.connRequests { + break + } + delete(p.connRequests, reqKey) + req <- connRequest{ + conn: cn, + err: err, + } + } else { + p.freeConns = append(p.freeConns, cn) + p.startCleanerLocked() + } + return true +} + +// conn will return an available pooled connection. Either an idle connection or +// by dialing a new one if the pool does not currently have a maximum number +// of active connections. +func (p *Pool) conn() (*conn, error) { + ctx := context.Background() + return p.connCtx(ctx) +} +func (p *Pool) connCtx(ctx context.Context) (*conn, error) { + cn, err := p._conn(ctx, true) + if err == nil { + return cn, nil + } + if errors.Cause(err) == ErrBadConn { + return p._conn(ctx, false) + } + return cn, err +} + +func (p *Pool) _conn(ctx context.Context, useFreeConn bool) (*conn, error) { + p.mu.Lock() + if p.closed { + p.mu.Unlock() + return nil, ErrGraphDBClosed + } + // Check if the context is expired. + select { + default: + case <-ctx.Done(): + p.mu.Unlock() + return nil, errors.Wrap(ctx.Err(), "the context is expired") + } + + var pc *conn + numFree := len(p.freeConns) + if useFreeConn && numFree > 0 { + pc = p.freeConns[0] + copy(p.freeConns, p.freeConns[1:]) + p.freeConns = p.freeConns[:numFree-1] + p.mu.Unlock() + if pc.expired(p.MaxLifetime) { + return nil, p.subtractOpen(so{conn: pc}, ErrBadConn) + } + return pc, nil + } + + if p.MaxOpen > 0 && p.MaxOpen <= p.open { + req := make(chan connRequest, 1) + reqKey := p.nextRequest + p.nextRequest++ + p.connRequests[reqKey] = req + p.mu.Unlock() + + select { + // timeout + case <-ctx.Done(): + // Remove the connection request and ensure no value has been sent + // on it after removing. + p.mu.Lock() + delete(p.connRequests, reqKey) + p.mu.Unlock() + select { + case ret, ok := <-req: + if ok { + p.putConn(ret.conn, ret.err) + } + default: + } + return nil, errors.Wrap(ctx.Err(), "Deadline of connRequests exceeded") + case ret, ok := <-req: + if !ok { + return nil, ErrGraphDBClosed + } + if ret.err != nil { + return ret.conn, errors.Wrap(ret.err, "Response has an error") + } + return ret.conn, nil + } + } + + p.open++ + p.mu.Unlock() + newCn, err := p.dial() + if err != nil { + return nil, p.subtractOpen(so{tryOpening: true}, errors.Wrap(err, "Failed newConn")) + } + return &conn{ + Pool: p, + Client: newCn, + t: time.Now(), + }, nil +} + +func (p *Pool) needStartCleaner() bool { + return p.MaxLifetime > 0 && + p.open > 0 && + p.cleanerCh == nil +} + +// startCleanerLocked starts connectionCleaner if needed. +func (p *Pool) startCleanerLocked() { + if p.needStartCleaner() { + p.cleanerCh = make(chan struct{}, 1) + go p.connectionCleaner() + } +} + +func (p *Pool) connectionCleaner() { + const minInterval = time.Second + + d := p.MaxLifetime + if d < minInterval { + d = minInterval + } + t := time.NewTimer(d) + + for { + select { + case <-t.C: + case <-p.cleanerCh: // dbclient was closed. + } + + ml := p.MaxLifetime + p.mu.Lock() + if p.closed || len(p.freeConns) == 0 || ml <= 0 { + p.cleanerCh = nil + p.mu.Unlock() + return + } + n := time.Now() + mlExpiredSince := n.Add(-ml) + var closing []*conn + for i := 0; i < len(p.freeConns); i++ { + pc := p.freeConns[i] + if (ml > 0 && pc.t.Before(mlExpiredSince)) || + pc.Client.Errored { + p.open-- + closing = append(closing, pc) + last := len(p.freeConns) - 1 + p.freeConns[i] = p.freeConns[last] + p.freeConns[last] = nil + p.freeConns = p.freeConns[:last] + i-- + } + } + p.mu.Unlock() + + for _, pc := range closing { + if pc.Client != nil { + pc.Client.Close() + } + } + + t.Reset(d) + } +} + +// Execute formats a raw Gremlin query, sends it to Gremlin Server, and returns the result. +func (p *Pool) Execute(query string, bindings, rebindings map[string]string) (resp []Response, err error) { + return p.ExecuteCtx(context.Background(), query, bindings, rebindings) +} +func (p *Pool) ExecuteCtx(ctx context.Context, query string, bindings, rebindings map[string]string) (resp []Response, err error) { + pc, err := p.conn() + if err != nil { + return resp, errors.Wrap(err, "Failed p.conn") + } + defer func() { + p.putConn(pc, err) + }() + resp, err = pc.Client.executeRequestCtx(ctx, query, bindings, rebindings) + return +} + +// ExecuteFile takes a file path to a Gremlin script, sends it to Gremlin Server, and returns the result. +func (p *Pool) ExecuteFile(path string, bindings, rebindings map[string]string) (resp []Response, err error) { + pc, err := p.conn() + if err != nil { + return resp, errors.Wrap(err, "Failed p.conn") + } + defer func() { + p.putConn(pc, err) + }() + d, err := ioutil.ReadFile(path) // Read script from file + if err != nil { + log.Println(err) + return + } + query := string(d) + resp, err = pc.Client.executeRequest(query, bindings, rebindings) + return +} + +// AddV +func (p *Pool) AddV(label string, i interface{}, bindings, rebindings map[string]string) (resp graphson.Vertex, err error) { + return p.AddVertexCtx(context.Background(), label, i, bindings, rebindings) +} +func (p *Pool) AddVertexCtx(ctx context.Context, label string, i interface{}, bindings, rebindings map[string]string) (resp graphson.Vertex, err error) { + var pc *conn + if pc, err = p.conn(); err != nil { + return resp, errors.Wrap(err, "Failed p.conn") + } + defer p.putConn(pc, err) + return pc.Client.AddVertexCtx(ctx, label, i, bindings, rebindings) +} + +// Get +func (p *Pool) Get(query string, bindings, rebindings map[string]string) (resp interface{}, err error) { + var pc *conn + if pc, err = p.conn(); err != nil { + return resp, errors.Wrap(err, "Failed p.conn") + } + defer p.putConn(pc, err) + return pc.Client.Get(query, bindings, rebindings) +} + +// GetCtx +func (p *Pool) GetCtx(ctx context.Context, query string, bindings, rebindings map[string]string) (resp interface{}, err error) { + var pc *conn + if pc, err = p.connCtx(ctx); err != nil { + return resp, errors.Wrap(err, "GetCtx: Failed p.connCtx") + } + defer p.putConn(pc, err) + return pc.Client.GetCtx(ctx, query, bindings, rebindings) +} + +// OpenCursorCtx initiates a query on the database, returning a cursor to iterate over the results +func (p *Pool) OpenCursorCtx(ctx context.Context, query string, bindings, rebindings map[string]string) (cursor *Cursor, err error) { + var pc *conn + if pc, err = p.connCtx(ctx); err != nil { + err = errors.Wrap(err, "GetCursorCtx: Failed p.connCtx") + return + } + defer p.putConn(pc, err) + return pc.Client.OpenCursorCtx(ctx, query, bindings, rebindings) +} + +// ReadCursorCtx returns the next set of results for the cursor +// - `res` returns vertices (and may be empty when results were read by a previous call - this is normal) +// - `eof` will be true when no more results are available (`res` may still have results) +func (p *Pool) ReadCursorCtx(ctx context.Context, cursor *Cursor) (res []graphson.Vertex, eof bool, err error) { + var pc *conn + if pc, err = p.connCtx(ctx); err != nil { + err = errors.Wrap(err, "NextCtx: Failed p.connCtx") + return + } + defer p.putConn(pc, err) + return pc.Client.ReadCursorCtx(ctx, cursor) +} + +// AddE +func (p *Pool) AddE(label, fromId, toId string, props map[string]interface{}) (resp interface{}, err error) { + return p.AddEdgeCtx(context.Background(), label, fromId, toId, props) +} + +func (p *Pool) AddEdgeCtx(ctx context.Context, label, fromId, toId string, props map[string]interface{}) (resp interface{}, err error) { + // AddEdgeCtx + var pc *conn + if pc, err = p.conn(); err != nil { + return resp, errors.Wrap(err, "Failed p.conn") + } + defer p.putConn(pc, err) + return pc.Client.AddEdgeCtx(ctx, label, fromId, toId, props) +} + +// GetE +func (p *Pool) GetE(q string, bindings, rebindings map[string]string) (resp interface{}, err error) { + return p.GetEdgeCtx(context.Background(), q, bindings, rebindings) +} + +func (p *Pool) GetEdgeCtx(ctx context.Context, q string, bindings, rebindings map[string]string) (resp interface{}, err error) { + var pc *conn + if pc, err = p.conn(); err != nil { + return resp, errors.Wrap(err, "Failed p.conn") + } + defer p.putConn(pc, err) + return pc.Client.GetEdgeCtx(ctx, q, bindings, rebindings) +} + +func (p *Pool) GetCount(q string, bindings, rebindings map[string]string) (i int64, err error) { + return p.GetCountCtx(context.Background(), q, bindings, rebindings) +} +func (p *Pool) GetCountCtx(ctx context.Context, q string, bindings, rebindings map[string]string) (i int64, err error) { + var pc *conn + if pc, err = p.conn(); err != nil { + return 0, errors.Wrap(err, "Failed p.conn") + } + defer p.putConn(pc, err) + return pc.Client.GetCountCtx(ctx, q, bindings, rebindings) +} + +func (p *Pool) GetStringList(q string, bindings, rebindings map[string]string) (vals []string, err error) { + return p.GetStringListCtx(context.Background(), q, bindings, rebindings) +} +func (p *Pool) GetStringListCtx(ctx context.Context, q string, bindings, rebindings map[string]string) (vals []string, err error) { + var pc *conn + if pc, err = p.conn(); err != nil { + err = errors.Wrap(err, "GetStringListCtx: Failed p.conn") + return + } + defer p.putConn(pc, err) + return pc.Client.GetStringListCtx(ctx, q, bindings, rebindings) +} + +// GetProperties returns a map of vertex properties +func (p *Pool) GetProperties(q string, bindings, rebindings map[string]string) (vals map[string][]interface{}, err error) { + return p.GetPropertiesCtx(context.Background(), q, bindings, rebindings) +} +func (p *Pool) GetPropertiesCtx(ctx context.Context, q string, bindings, rebindings map[string]string) (vals map[string][]interface{}, err error) { + var pc *conn + if pc, err = p.conn(); err != nil { + err = errors.Wrap(err, "GetPropertiesCtx: Failed p.conn") + return + } + defer p.putConn(pc, err) + return pc.Client.GetPropertiesCtx(ctx, q, bindings, rebindings) +} + +// Close closes the pool. +func (p *Pool) Close() { + p.mu.Lock() + + close(p.openerCh) + if p.cleanerCh != nil { + close(p.cleanerCh) + } + for _, cr := range p.connRequests { + close(cr) + } + p.closed = true + p.mu.Unlock() + for _, pc := range p.freeConns { + if pc.Client != nil { + pc.Client.Close() + } + } + p.mu.Lock() + p.freeConns = nil + p.mu.Unlock() +} + +func (cn *conn) expired(timeout time.Duration) bool { + if timeout <= 0 { + return false + } + return cn.t.Add(timeout).Before(time.Now()) +} diff --git a/vendor/github.com/ONSdigital/gremgo-neptune/request.go b/vendor/github.com/ONSdigital/gremgo-neptune/request.go new file mode 100644 index 00000000..4718bb2c --- /dev/null +++ b/vendor/github.com/ONSdigital/gremgo-neptune/request.go @@ -0,0 +1,96 @@ +package gremgo + +import ( + "context" + "encoding/base64" + "encoding/json" + + "github.com/gofrs/uuid" +) + +const mimeTypeStr = "application/vnd.gremlin-v3.0+json" + +// create the header as []byte with the length byte as prefix +var mimeTypePrefix = append([]byte{byte(len(mimeTypeStr))}, []byte(mimeTypeStr)...) + +type requester interface { + prepare() error + getID() string + getRequest() request +} + +// request is a container for all evaluation request parameters to be sent to the Gremlin Server. +type request struct { + RequestID string `json:"requestId"` + Op string `json:"op"` + Processor string `json:"processor"` + Args map[string]interface{} `json:"args"` +} + +// prepareRequest packages a query and binding into the format that Gremlin Server accepts +func prepareRequest(query string, bindings, rebindings map[string]string) (req request, id string, err error) { + var uuID uuid.UUID + if uuID, err = uuid.NewV4(); err != nil { + return + } + id = uuID.String() + + req.RequestID = id + req.Op = "eval" + req.Processor = "" + + req.Args = make(map[string]interface{}) + req.Args["language"] = "gremlin-groovy" + req.Args["gremlin"] = query + if len(bindings) > 0 || len(rebindings) > 0 { + req.Args["bindings"] = bindings + req.Args["rebindings"] = rebindings + } + + return +} + +//prepareAuthRequest creates a ws request for Gremlin Server +func prepareAuthRequest(requestID string, username string, password string) (req request, err error) { + req.RequestID = requestID + req.Op = "authentication" + req.Processor = "trasversal" + + var simpleAuth []byte + user := []byte(username) + pass := []byte(password) + + simpleAuth = append(simpleAuth, 0) + simpleAuth = append(simpleAuth, user...) + simpleAuth = append(simpleAuth, 0) + simpleAuth = append(simpleAuth, pass...) + + req.Args = make(map[string]interface{}) + req.Args["sasl"] = base64.StdEncoding.EncodeToString(simpleAuth) + + return +} + +// packageRequest takes a request type and formats it into being able to be delivered to Gremlin Server +func packageRequest(req request) (msg []byte, err error) { + j, err := json.Marshal(req) // Formats request into byte format + if err != nil { + return + } + return append(mimeTypePrefix, j...), nil +} + +// dispatchRequest sends the request for writing to the remote Gremlin Server +func (c *Client) dispatchRequest(msg []byte) { + c.requests <- msg +} + +// dispatchRequestCtx sends the request for writing to the remote Gremlin Server +func (c *Client) dispatchRequestCtx(ctx context.Context, msg []byte) (err error) { + select { + case c.requests <- msg: + case <-ctx.Done(): + err = ctx.Err() + } + return +} diff --git a/vendor/github.com/ONSdigital/gremgo-neptune/response.go b/vendor/github.com/ONSdigital/gremgo-neptune/response.go new file mode 100644 index 00000000..3e307554 --- /dev/null +++ b/vendor/github.com/ONSdigital/gremgo-neptune/response.go @@ -0,0 +1,231 @@ +package gremgo + +import ( + "context" + "encoding/json" + "fmt" + + "github.com/pkg/errors" +) + +const ( + statusSuccess = 200 + statusNoContent = 204 + statusPartialContent = 206 + statusUnauthorized = 401 + statusAuthenticate = 407 + statusMalformedRequest = 498 + statusInvalidRequestArguments = 499 + statusServerError = 500 + statusScriptEvaluationError = 597 + statusServerTimeout = 598 + statusServerSerializationError = 599 +) + +// Status struct is used to hold properties returned from requests to the gremlin server +type Status struct { + Message string `json:"message"` + Code int `json:"code"` + Attributes map[string]interface{} `json:"attributes"` +} + +// Result struct is used to hold properties returned for results from requests to the gremlin server +type Result struct { + // Query Response Data + Data json.RawMessage `json:"data"` + Meta map[string]interface{} `json:"meta"` +} + +// Response structs holds the entire response from requests to the gremlin server +type Response struct { + RequestID string `json:"requestId"` + Status Status `json:"status"` + Result Result `json:"result"` +} + +// ToString returns a string representation of the Response struct +func (r Response) ToString() string { + return fmt.Sprintf("Response \nRequestID: %v, \nStatus: {%#v}, \nResult: {%#v}\n", r.RequestID, r.Status, r.Result) +} + +func (c *Client) saveWorkerCtx(ctx context.Context, msgChan chan []byte, errs chan error) { + for { + select { + case msg := <-msgChan: + if err := c.handleResponse(msg); err != nil { + errs <- errors.Wrapf(err, "saveWorkerCtx: handleResponse error") + } + case <-ctx.Done(): + return + } + } +} + +func (c *Client) handleResponse(msg []byte) (err error) { + var resp Response + resp, err = marshalResponse(msg) + if resp.Status.Code == statusAuthenticate { //Server request authentication + return c.authenticate(resp.RequestID) + } + c.saveResponse(resp, err) + return +} + +// marshalResponse creates a response struct for every incoming response for further manipulation +func marshalResponse(msg []byte) (resp Response, err error) { + err = json.Unmarshal(msg, &resp) + if err != nil { + return + } + + err = resp.detectError() + return +} + +// saveResponse makes the response (and its err) available for retrieval by the requester. +// Mutexes are used for thread safety. +func (c *Client) saveResponse(resp Response, err error) { + c.Lock() + defer c.Unlock() + var newdata []interface{} + existingData, ok := c.results.Load(resp.RequestID) // Retrieve old data container (for requests with multiple responses) + if ok { + newdata = append(existingData.([]interface{}), resp) // Create new data container with new data + existingData = nil + } else { + newdata = append(newdata, resp) + } + c.results.Store(resp.RequestID, newdata) // Add new data to buffer for future retrieval + respNotifier, _ := c.responseNotifier.LoadOrStore(resp.RequestID, make(chan error, 1)) + // err is from marshalResponse (json.Unmarshal), but is ignored when Code==statusPartialContent + if resp.Status.Code == statusPartialContent { + if chunkNotifier, ok := c.chunkNotifier.Load(resp.RequestID); ok { + chunkNotifier.(chan bool) <- true + } + } else { + respNotifier.(chan error) <- err + } +} + +// retrieveResponse retrieves the response saved by saveResponse. +func (c *Client) retrieveResponse(id string) (data []Response, err error) { + resp, _ := c.responseNotifier.Load(id) + if err = <-resp.(chan error); err == nil { + data = c.getCurrentResults(id) + } + c.cleanResults(id, resp.(chan error), nil) + return +} + +func (c *Client) getCurrentResults(id string) (data []Response) { + dataI, ok := c.results.Load(id) + if !ok { + return + } + d := dataI.([]interface{}) + dataI = nil + data = make([]Response, len(d)) + if len(d) == 0 { + return + } + for i := range d { + data[i] = d[i].(Response) + } + return +} + +func (c *Client) cleanResults(id string, respNotifier chan error, chunkNotifier chan bool) { + if respNotifier == nil { + return + } + c.responseNotifier.Delete(id) + close(respNotifier) + if chunkNotifier != nil { + close(chunkNotifier) + c.chunkNotifier.Delete(id) + } + c.deleteResponse(id) +} + +// retrieveResponseCtx retrieves the response saved by saveResponse. +func (c *Client) retrieveResponseCtx(ctx context.Context, id string) (data []Response, err error) { + respNotifier, _ := c.responseNotifier.Load(id) + select { + case err = <-respNotifier.(chan error): + defer c.cleanResults(id, respNotifier.(chan error), nil) + if err != nil { + return + } + data = c.getCurrentResults(id) + case <-ctx.Done(): + err = ctx.Err() + } + return +} + +// retrieveNextResponseCtx retrieves the current response (may be empty!) saved by saveResponse, +// `done` is true when the results are complete (eof) +func (c *Client) retrieveNextResponseCtx(ctx context.Context, cursor *Cursor) (data []Response, done bool, err error) { + c.Lock() + respNotifier, ok := c.responseNotifier.Load(cursor.ID) + c.Unlock() + if respNotifier == nil || !ok { + return + } + + var chunkNotifier chan bool + if chunkNotifierInterface, ok := c.chunkNotifier.Load(cursor.ID); ok { + chunkNotifier = chunkNotifierInterface.(chan bool) + } + + select { + case err = <-respNotifier.(chan error): + defer c.cleanResults(cursor.ID, respNotifier.(chan error), chunkNotifier) + if err != nil { + return + } + data = c.getCurrentResults(cursor.ID) + done = true + case <-chunkNotifier: + c.Lock() + data = c.getCurrentResults(cursor.ID) + c.deleteResponse(cursor.ID) + c.Unlock() + case <-ctx.Done(): + err = ctx.Err() + } + + return +} + +// deleteResponse deletes the response from the container. Used for cleanup purposes by requester. +func (c *Client) deleteResponse(id string) { + c.results.Delete(id) + return +} + +// detectError detects any possible errors in responses from Gremlin Server and generates an error for each code +func (r *Response) detectError() (err error) { + switch r.Status.Code { + case statusSuccess, statusNoContent, statusPartialContent: + case statusUnauthorized: + err = fmt.Errorf("UNAUTHORIZED - Response Message: %s", r.Status.Message) + case statusAuthenticate: + err = fmt.Errorf("AUTHENTICATE - Response Message: %s", r.Status.Message) + case statusMalformedRequest: + err = fmt.Errorf("MALFORMED REQUEST - Response Message: %s", r.Status.Message) + case statusInvalidRequestArguments: + err = fmt.Errorf("INVALID REQUEST ARGUMENTS - Response Message: %s", r.Status.Message) + case statusServerError: + err = fmt.Errorf("SERVER ERROR - Response Message: %s", r.Status.Message) + case statusScriptEvaluationError: + err = fmt.Errorf("SCRIPT EVALUATION ERROR - Response Message: %s", r.Status.Message) + case statusServerTimeout: + err = fmt.Errorf("SERVER TIMEOUT - Response Message: %s", r.Status.Message) + case statusServerSerializationError: + err = fmt.Errorf("SERVER SERIALIZATION ERROR - Response Message: %s", r.Status.Message) + default: + err = fmt.Errorf("UNKNOWN ERROR - Response Message: %s", r.Status.Message) + } + return +} diff --git a/vendor/github.com/ONSdigital/gremgo-neptune/tags.go b/vendor/github.com/ONSdigital/gremgo-neptune/tags.go new file mode 100644 index 00000000..8339d39a --- /dev/null +++ b/vendor/github.com/ONSdigital/gremgo-neptune/tags.go @@ -0,0 +1,42 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gremgo + +import "strings" + +// tagOptions is the string following a comma in a struct field's "json" +// tag, or the empty string. It does not include the leading comma. +type tagOptions string + +// parseTag splits a struct field's json tag into its name and +// comma-separated options. +func parseTag(tag string) (string, tagOptions) { + if idx := strings.Index(tag, ","); idx != -1 { + return tag[:idx], tagOptions(tag[idx+1:]) + } + return tag, tagOptions("") +} + +// Contains reports whether a comma-separated list of options +// contains a particular substr flag. substr must be surrounded by a +// string boundary or commas. +func (o tagOptions) Contains(optionName string) bool { + if len(o) == 0 { + return false + } + s := string(o) + for s != "" { + var next string + i := strings.Index(s, ",") + if i >= 0 { + s, next = s[:i], s[i+1:] + } + if s == optionName { + return true + } + s = next + } + return false +} diff --git a/vendor/github.com/gedge/graphson/deserialize.go b/vendor/github.com/gedge/graphson/deserialize.go new file mode 100644 index 00000000..f0b96aed --- /dev/null +++ b/vendor/github.com/gedge/graphson/deserialize.go @@ -0,0 +1,246 @@ +package graphson + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" +) + +func DeserializeVertices(rawResponse string) ([]Vertex, error) { + // TODO: empty strings for property values will cause invalid json + // make so it can handle that case + if len(rawResponse) == 0 { + return []Vertex{}, nil + } + return DeserializeVerticesFromBytes([]byte(rawResponse)) +} + +func DeserializeVerticesFromBytes(rawResponse []byte) ([]Vertex, error) { + // TODO: empty strings for property values will cause invalid json + // make so it can handle that case + var response []Vertex + if len(rawResponse) == 0 { + return response, nil + } + dec := json.NewDecoder(bytes.NewReader(rawResponse)) + dec.DisallowUnknownFields() + if err := dec.Decode(&response); err != nil { + return nil, err + } + return response, nil +} + +func DeserializeListOfVerticesFromBytes(rawResponse []byte) ([]Vertex, error) { + var metaResponse ListVertices + var response []Vertex + if len(rawResponse) == 0 { + return response, nil + } + dec := json.NewDecoder(bytes.NewReader(rawResponse)) + dec.DisallowUnknownFields() + if err := dec.Decode(&metaResponse); err != nil { + return nil, err + } + + if metaResponse.Type != "g:List" { + return response, errors.New("DeserializeListOfVerticesFromBytes: Expected `g:List` type") + } + + return metaResponse.Value, nil +} + +func DeserializeListOfEdgesFromBytes(rawResponse []byte) (Edges, error) { + var metaResponse ListEdges + var response Edges + if len(rawResponse) == 0 { + return response, nil + } + dec := json.NewDecoder(bytes.NewReader(rawResponse)) + dec.DisallowUnknownFields() + err := dec.Decode(&metaResponse) + if err != nil { + return nil, err + } + + if metaResponse.Type != "g:List" { + return response, errors.New("DeserializeListOfEdgesFromBytes: Expected `g:List` type") + } + + return metaResponse.Value, nil +} + +func DeserializeMapFromBytes(rawResponse []byte) (resMap map[string]interface{}, err error) { + var metaResponse GList + if len(rawResponse) == 0 { + return + } + dec := json.NewDecoder(bytes.NewReader(rawResponse)) + dec.DisallowUnknownFields() + if err = dec.Decode(&metaResponse); err != nil { + return nil, err + } + + if metaResponse.Type != "g:Map" { + return resMap, errors.New("DeserializeMapFromBytes: Expected `g:Map` type") + } + + return resMap, nil +} + +// DeserializePropertiesFromBytes is for converting vertex .properties() results into a map +func DeserializePropertiesFromBytes(rawResponse []byte, resMap map[string][]interface{}) (err error) { + var metaResponse GList + if len(rawResponse) == 0 { + return + } + dec := json.NewDecoder(bytes.NewReader(rawResponse)) + dec.DisallowUnknownFields() + if err = dec.Decode(&metaResponse); err != nil { + return + } + + if metaResponse.Type != "g:List" { + return errors.New("DeserializePropertiesFromBytes: Expected `g:List` type") + } + var props []VertexProperty + if err = json.Unmarshal(metaResponse.Value, &props); err != nil { + return + } + + for _, prop := range props { + if _, ok := resMap[prop.Value.Label]; !ok { + resMap[prop.Value.Label] = []interface{}{prop.Value.Value} + } else { + resMap[prop.Value.Label] = append(resMap[prop.Value.Label], prop.Value.Value) + } + } + + return +} + +// DeserializeStringListFromBytes get a g:List value which should be a a list of strings, return those +func DeserializeStringListFromBytes(rawResponse []byte) (vals []string, err error) { + var metaResponse GList + if len(rawResponse) == 0 { + err = errors.New("DeserializeStringListFromBytes: nothing to decode") + return + } + dec := json.NewDecoder(bytes.NewReader(rawResponse)) + dec.DisallowUnknownFields() + if err = dec.Decode(&metaResponse); err != nil { + return + } + + if metaResponse.Type != "g:List" { + err = errors.New("DeserializeStringListFromBytes: Expected `g:List` type") + return + } + + if err = json.Unmarshal(metaResponse.Value, &vals); err != nil { + return + } + return +} + +// DeserializeSingleFromBytes get a g:List value which should be a singular item, returns that item +func DeserializeSingleFromBytes(rawResponse []byte) (gV GenericValue, err error) { + var metaResponse GList + if len(rawResponse) == 0 { + err = errors.New("DeserializeSingleFromBytes: nothing to decode") + return + } + dec := json.NewDecoder(bytes.NewReader(rawResponse)) + dec.DisallowUnknownFields() + if err = dec.Decode(&metaResponse); err != nil { + return + } + + if metaResponse.Type != "g:List" { + err = errors.New("DeserializeSingleFromBytes: Expected `g:List` type") + return + } + + var genVals GenericValues + if genVals, err = DeserializeGenericValues(string(metaResponse.Value)); err != nil { + return + } + + if len(genVals) != 1 { + err = fmt.Errorf("DeserializeSingleFromBytes: Expected single value, got %d", len(genVals)) + return + } + + return genVals[0], nil +} + +// DeserializeNumber returns the count from the g:List'd database response +func DeserializeNumber(rawResponse []byte) (count int64, err error) { + var genVal GenericValue + if genVal, err = DeserializeSingleFromBytes(rawResponse); err != nil { + return + } + + if genVal.Type != "g:Int64" { + err = errors.New("DeserializeNumber: Expected `g:Int64` type") + return + } + count = int64(genVal.Value.(float64)) + return +} + +func DeserializeEdges(rawResponse string) (Edges, error) { + var response Edges + if rawResponse == "" { + return response, nil + } + err := json.Unmarshal([]byte(rawResponse), &response) + if err != nil { + return nil, err + } + return response, nil +} + +func DeserializeGenericValue(rawResponse string) (response GenericValue, err error) { + if len(rawResponse) == 0 { + return + } + if err = json.Unmarshal([]byte(rawResponse), &response); err != nil { + return + } + return +} + +func DeserializeGenericValues(rawResponse string) (GenericValues, error) { + var response GenericValues + if rawResponse == "" { + return response, nil + } + err := json.Unmarshal([]byte(rawResponse), &response) + if err != nil { + return nil, err + } + return response, nil +} + +func ConvertToCleanVertices(vertices []Vertex) []CleanVertex { + var responseVertices []CleanVertex + for _, vertex := range vertices { + responseVertices = append(responseVertices, CleanVertex{ + Id: vertex.Value.ID, + Label: vertex.Value.Label, + }) + } + return responseVertices +} + +func ConvertToCleanEdges(edges Edges) []CleanEdge { + var responseEdges []CleanEdge + for _, edge := range edges { + responseEdges = append(responseEdges, CleanEdge{ + Source: edge.Value.InV, + Target: edge.Value.OutV, + }) + } + return responseEdges +} diff --git a/vendor/github.com/gedge/graphson/types.go b/vendor/github.com/gedge/graphson/types.go new file mode 100644 index 00000000..028027eb --- /dev/null +++ b/vendor/github.com/gedge/graphson/types.go @@ -0,0 +1,153 @@ +package graphson + +import "encoding/json" + +// cbi made up, not a real graphson or gremlin thing +// type GremlinResponse struct { +// V Vertices +// E Edges +// } + +type GList struct { + Type string `json:"@type"` + Value json.RawMessage `json:"@value"` +} + +// type GMap struct { +// Type string `json:"@type"` +// Value json.RawMessage `json:"@value"` +// } + +type ListVertices struct { + Type string `json:"@type"` + Value []Vertex `json:"@value"` +} +type ListEdges struct { + Type string `json:"@type"` + Value Edges `json:"@value"` +} + +// type Vertices []Vertex + +type Vertex struct { + Type string `json:"@type"` + Value VertexValue `json:"@value"` +} + +type VertexValue struct { + ID string `json:"id"` + Label string `json:"label"` + Properties map[string][]VertexProperty `json:"properties"` +} + +type VertexProperty struct { + Type string `json:"@type"` + Value VertexPropertyValue `json:"@value"` +} + +type EdgeProperty struct { + Type string `json:"@type"` + Value EdgePropertyValue `json:"@value"` +} + +type VertexPropertyValue struct { + ID GenericValue `json:"id"` + Label string `json:"label"` + Value interface{} `json:"value"` +} + +type EdgePropertyValue struct { + Label string `json:"key"` + // Value GenericValue `json:"value"` // this works when value is NOT a string + Value json.RawMessage `json:"value"` + // ValueStr string `json:"value"` + // Value interface{} `json:"value"` +} + +type GenericValues []GenericValue + +type GenericValue struct { + Type string `json:"@type"` + Value interface{} `json:"@value"` +} + +type Edges []Edge + +type Edge struct { + Type string `json:"@type"` + Value EdgeValue `json:"@value"` +} + +type EdgeValue struct { + ID string `json:"id"` + Label string `json:"label"` + InVLabel string `json:"inVLabel"` + OutVLabel string `json:"outVLabel"` + InV string `json:"inV"` + OutV string `json:"outV"` + Properties map[string]EdgeProperty `json:"properties"` +} + +// type CleanResponse struct { +// V []CleanVertex +// E []CleanEdge +// } + +type CleanEdge struct { + Source string `json:"source"` + Target string `json:"target"` +} + +type CleanVertex struct { + Id string `json:"id"` + Label string `json:"label"` +} + +// type MinVertex struct { +// ID string +// Label string +// Props map[string][]MinVertexProp +// } +// type MinVertexProp struct { +// // ID string +// Label string +// Value interface{} +// } + +// type UpsertVertexMap struct { +// Id string `json:""` +// Label string `json:"label"` +// } + +// type TypeID int + +// const ( +// TypeString TypeID = iota +// TypeBoolean +// TypeMap +// TypeCollection +// TypeClass +// TypeDate +// TypeDouble +// TypeFloat +// TypeInteger +// TypeLong +// TypeTimestamp +// TypeUUID +// TypeVertex +// TypeVertexProperty +// ) + +// const ( +// TypeStrDate = "g:Date" +// TypeStrDouble = "g:Double" +// TypeStrFloat = "g:Float" +// TypeStrInteger = "g:Int32" +// TypeStrLong = "g:Int64" +// TypeStrTimestamp = "g:Timestamp" +// TypeStrUUID = "g:UUID" +// TypeStrVertex = "g:Vertex" +// TypeStrVertexProperty = "g:VertexProperty" +// TypeStrProperty = "g:Property" +// TypeStrEdge = "g:Edge" +// ) diff --git a/vendor/github.com/gedge/graphson/utils.go b/vendor/github.com/gedge/graphson/utils.go new file mode 100644 index 00000000..29243048 --- /dev/null +++ b/vendor/github.com/gedge/graphson/utils.go @@ -0,0 +1,238 @@ +package graphson + +import ( + "errors" + "strings" +) + +var ( + ErrorPropertyNotFound = errors.New("property not found") + ErrorPropertyIsMeta = errors.New("meta-property found where multi-property expected") + ErrorPropertyIsMulti = errors.New("multi-property found where singleton expected") + ErrorUnexpectedPropertyType = errors.New("property value could not be cast into expected type") +) + +// GetID returns the string ID for the given vertex +func (v Vertex) GetID() string { + return v.Value.ID +} + +// GetLabels returns the []string labels for the given vertex +func (v Vertex) GetLabels() (labels []string) { + labels = append(labels, v.Value.Label) + if strings.Index(labels[0], "::") == -1 { + return + } + return strings.Split(labels[0], "::") +} + +// GetLabel returns the string label for the given vertex, or an error if >1 +func (v Vertex) GetLabel() (string, error) { + labels := v.GetLabels() + if len(labels) > 1 { + return "", errors.New("too many labels - expected one") + } + return labels[0], nil +} + +// GetMultiProperty returns the ([]string) values for the given property `key` +// will return an error if the property is not the correct type +func (v Vertex) GetMultiProperty(key string) (vals []string, err error) { + var valsInterface []interface{} + if valsInterface, err = v.GetMultiPropertyAs(key, "string"); err != nil { + return + } + for _, val := range valsInterface { + vals = append(vals, val.(string)) + } + return +} + +// GetMultiPropertyBool returns the ([]bool) values for the given property `key` +// will return an error if the property is not the correct type +func (v Vertex) GetMultiPropertyBool(key string) (vals []bool, err error) { + var valsInterface []interface{} + if valsInterface, err = v.GetMultiPropertyAs(key, "bool"); err != nil { + return + } + for _, val := range valsInterface { + vals = append(vals, val.(bool)) + } + return +} + +// GetMultiPropertyInt64 returns the ([]int64) values for the given property `key` +// will return an error if the property is not the correct type +func (v Vertex) GetMultiPropertyInt64(key string) (vals []int64, err error) { + var valsInterface []interface{} + if valsInterface, err = v.GetMultiPropertyAs(key, "int64"); err != nil { + return + } + for _, val := range valsInterface { + vals = append(vals, val.(int64)) + } + return +} + +// GetMultiPropertyInt32 returns the ([]int32) values for the given property `key` +// will return an error if the property is not the correct type +func (v Vertex) GetMultiPropertyInt32(key string) (vals []int32, err error) { + var valsInterface []interface{} + if valsInterface, err = v.GetMultiPropertyAs(key, "int32"); err != nil { + return + } + for _, val := range valsInterface { + vals = append(vals, val.(int32)) + } + return +} + +// getMultiPropertyAs returns the values for the given property `key` as type `wantType` +// will return an error if the property is not a set of the given `wantType` (string, bool, int64) +func (v Vertex) GetMultiPropertyAs(key, wantType string) (vals []interface{}, err error) { + var valInterface []VertexProperty + var ok bool + if valInterface, ok = v.Value.Properties[key]; !ok { + err = ErrorPropertyNotFound + return + } + for _, prop := range valInterface { + if prop.Value.Label != key { + err = ErrorPropertyIsMulti + return + } + switch wantType { + + case "string": + var val string + if val, ok = prop.Value.Value.(string); !ok { + err = ErrorUnexpectedPropertyType + return + } + vals = append(vals, val) + case "bool": + var val bool + if val, ok = prop.Value.Value.(bool); !ok { + err = ErrorUnexpectedPropertyType + return + } + vals = append(vals, val) + case "int32": + var typeIf, valIf interface{} + if typeIf, ok = prop.Value.Value.(map[string]interface{})["@type"]; !ok || typeIf != "g:Int32" { + return vals, ErrorUnexpectedPropertyType + } + if valIf, ok = prop.Value.Value.(map[string]interface{})["@value"]; !ok { + return vals, ErrorUnexpectedPropertyType + } + var val float64 + if val, ok = valIf.(float64); !ok { + return vals, ErrorUnexpectedPropertyType + } + vals = append(vals, int32(val)) + case "int64": + var val int64 + if val, ok = prop.Value.Value.(int64); !ok { + err = ErrorUnexpectedPropertyType + return + } + vals = append(vals, val) + } + } + return +} + +// GetProperty returns the single string value for a given property `key` +// will return an error if the property is not a single string +func (v Vertex) GetProperty(key string) (val string, err error) { + var vals []string + if vals, err = v.GetMultiProperty(key); err != nil { + return + } + if len(vals) == 0 { + err = ErrorPropertyNotFound + return + } + if len(vals) > 1 { + err = ErrorPropertyIsMulti + return + } + return vals[0], nil +} + +// GetPropertyInt64 returns the single int64 value for a given property `key` +// will return an error if the property is not a single string +func (v Vertex) GetPropertyInt64(key string) (val int64, err error) { + var valsInterface []interface{} + if valsInterface, err = v.GetMultiPropertyAs(key, "int64"); err != nil { + return + } + if len(valsInterface) == 0 { + err = ErrorPropertyNotFound + return + } + if len(valsInterface) > 1 { + err = ErrorPropertyIsMulti + return + } + return valsInterface[0].(int64), nil +} + +// GetPropertyInt32 returns the single int32 value for a given property `key` +// will return an error if the property is not a single string +func (v Vertex) GetPropertyInt32(key string) (val int32, err error) { + var valsInterface []interface{} + if valsInterface, err = v.GetMultiPropertyAs(key, "int32"); err != nil { + return + } + if len(valsInterface) == 0 { + err = ErrorPropertyNotFound + return + } + if len(valsInterface) > 1 { + err = ErrorPropertyIsMulti + return + } + return valsInterface[0].(int32), nil +} + +// GetPropertyBool returns the single bool value for a given property `key` +// will return an error if the property is not a single string +func (v Vertex) GetPropertyBool(key string) (val bool, err error) { + var valsInterface []interface{} + if valsInterface, err = v.GetMultiPropertyAs(key, "bool"); err != nil { + return + } + if len(valsInterface) == 0 { + err = ErrorPropertyNotFound + return + } + if len(valsInterface) > 1 { + err = ErrorPropertyIsMulti + return + } + return valsInterface[0].(bool), nil +} + +// GetMetaProperty returns a map[string]string for the given property `key` +func (v Vertex) GetMetaProperty(key string) (metaMap map[string][]string, err error) { + var valInterface []VertexProperty + var ok bool + if valInterface, ok = v.Value.Properties[key]; !ok { + err = ErrorPropertyNotFound + return + } + for _, prop := range valInterface { + subKey := prop.Value.Label + var subVal string + if subVal, ok = prop.Value.Value.(string); !ok { + err = ErrorUnexpectedPropertyType + return + } + if metaMap == nil { + metaMap = make(map[string][]string) + } + metaMap[subKey] = append(metaMap[subKey], subVal) + } + return +} diff --git a/vendor/github.com/gedge/graphson/validation_utils.go b/vendor/github.com/gedge/graphson/validation_utils.go new file mode 100644 index 00000000..39d190c4 --- /dev/null +++ b/vendor/github.com/gedge/graphson/validation_utils.go @@ -0,0 +1,94 @@ +package graphson + +import ( + "fmt" +) + +func EdgesMatch(edge1, edge2 Edge) (bool, string) { + if edge1.Type != edge2.Type { + return false, "type" + } + // if ok, reason := GenericValuesMatch(edge1.Value.ID, edge2.Value.ID); !ok { + if edge1.Value.ID != edge2.Value.ID { + return false, "id" // + reason + } + if edge1.Value.Label != edge2.Value.Label { + return false, "label" + } + // if ok, reason := GenericValuesMatch(edge1.Value.InV, edge2.Value.InV); !ok { + if edge1.Value.InV != edge2.Value.InV { + return false, "inv" // + reason + } + if edge1.Value.InVLabel != edge2.Value.InVLabel { + return false, "invlabel" + } + // if ok, reason := GenericValuesMatch(edge1.Value.OutV, edge2.Value.OutV); !ok { + if edge1.Value.OutV != edge2.Value.OutV { + return false, "outv" // + reason + } + if edge1.Value.OutVLabel != edge2.Value.OutVLabel { + return false, "outvlabel" + } + if len(edge1.Value.Properties) != len(edge2.Value.Properties) { + return false, "properties" + } + for label, edge1Props := range edge1.Value.Properties { + edge2Props := edge2.Value.Properties[label] + if edge1Props.Type != edge2Props.Type { + return false, "prop.type" + } + if edge1Props.Value.Label != edge2Props.Value.Label || + fmt.Sprintf("%v", edge1Props.Value.Label) != fmt.Sprintf("%v", edge2Props.Value.Label) { + return false, "prop.value" + } + } + return true, "" +} + +func VerticesMatch(vertex1, vertex2 Vertex) bool { + if vertex1.Type != vertex2.Type { + return false + } + if vertex1.Value.ID != vertex2.Value.ID { + return false + } + if vertex1.Value.Label != vertex2.Value.Label { + return false + } + if len(vertex1.Value.Properties) != len(vertex2.Value.Properties) { + return false + } + for label, vertex1Props := range vertex1.Value.Properties { + vertex2Props := vertex2.Value.Properties[label] + if len(vertex1Props) != len(vertex2Props) { + return false + + } + for i, vertex1PropsElement := range vertex1Props { + vertex2PropsElement := vertex2Props[i] + if vertex1PropsElement.Type != vertex2PropsElement.Type { + return false + } + if vertex1PropsElement.Value.ID.Type != vertex2PropsElement.Value.ID.Type || + fmt.Sprintf("%v", vertex1PropsElement.Value.ID.Value) != fmt.Sprintf("%v", vertex2PropsElement.Value.ID.Value) { + return false + } + if vertex1PropsElement.Value.Label != vertex2PropsElement.Value.Label { + return false + } + if fmt.Sprintf("%v", vertex1PropsElement.Value.Value) != fmt.Sprintf("%v", vertex2PropsElement.Value.Value) { + return false + } + } + } + return true +} + +func GenericValuesMatch(gv1, gv2 GenericValue) (bool, string) { + if gv1.Type != gv2.Type { + return false, "type" + } + gv1ValueString := fmt.Sprintf("%v", gv1.Value) + gv2ValueString := fmt.Sprintf("%v", gv2.Value) + return gv1ValueString == gv2ValueString, "value" +} diff --git a/vendor/github.com/gofrs/uuid/LICENSE b/vendor/github.com/gofrs/uuid/LICENSE new file mode 100644 index 00000000..926d5498 --- /dev/null +++ b/vendor/github.com/gofrs/uuid/LICENSE @@ -0,0 +1,20 @@ +Copyright (C) 2013-2018 by Maxim Bublis + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/gofrs/uuid/README.md b/vendor/github.com/gofrs/uuid/README.md new file mode 100644 index 00000000..efc3204f --- /dev/null +++ b/vendor/github.com/gofrs/uuid/README.md @@ -0,0 +1,109 @@ +# UUID + +[![License](https://img.shields.io/github/license/gofrs/uuid.svg)](https://github.com/gofrs/uuid/blob/master/LICENSE) +[![Build Status](https://travis-ci.org/gofrs/uuid.svg?branch=master)](https://travis-ci.org/gofrs/uuid) +[![GoDoc](http://godoc.org/github.com/gofrs/uuid?status.svg)](http://godoc.org/github.com/gofrs/uuid) +[![Coverage Status](https://codecov.io/gh/gofrs/uuid/branch/master/graphs/badge.svg?branch=master)](https://codecov.io/gh/gofrs/uuid/) +[![Go Report Card](https://goreportcard.com/badge/github.com/gofrs/uuid)](https://goreportcard.com/report/github.com/gofrs/uuid) + +Package uuid provides a pure Go implementation of Universally Unique Identifiers +(UUID) variant as defined in RFC-4122. This package supports both the creation +and parsing of UUIDs in different formats. + +This package supports the following UUID versions: +* Version 1, based on timestamp and MAC address (RFC-4122) +* Version 2, based on timestamp, MAC address and POSIX UID/GID (DCE 1.1) +* Version 3, based on MD5 hashing of a named value (RFC-4122) +* Version 4, based on random numbers (RFC-4122) +* Version 5, based on SHA-1 hashing of a named value (RFC-4122) + +## Project History + +This project was originally forked from the +[github.com/satori/go.uuid](https://github.com/satori/go.uuid) repository after +it appeared to be no longer maintained, while exhibiting [critical +flaws](https://github.com/satori/go.uuid/issues/73). We have decided to take +over this project to ensure it receives regular maintenance for the benefit of +the larger Go community. + +We'd like to thank Maxim Bublis for his hard work on the original iteration of +the package. + +## License + +This source code of this package is released under the MIT License. Please see +the [LICENSE](https://github.com/gofrs/uuid/blob/master/LICENSE) for the full +content of the license. + +## Recommended Package Version + +We recommend using v2.0.0+ of this package, as versions prior to 2.0.0 were +created before our fork of the original package and have some known +deficiencies. + +## Installation + +It is recommended to use a package manager like `dep` that understands tagged +releases of a package, as well as semantic versioning. + +If you are unable to make use of a dependency manager with your project, you can +use the `go get` command to download it directly: + +```Shell +$ go get github.com/gofrs/uuid +``` + +## Requirements + +Due to subtests not being supported in older versions of Go, this package is +only regularly tested against Go 1.7+. This package may work perfectly fine with +Go 1.2+, but support for these older versions is not actively maintained. + +## Go 1.11 Modules + +As of v3.2.0, this repository no longer adopts Go modules, and v3.2.0 no longer has a `go.mod` file. As a result, v3.2.0 also drops support for the `github.com/gofrs/uuid/v3` import path. Only module-based consumers are impacted. With the v3.2.0 release, _all_ gofrs/uuid consumers should use the `github.com/gofrs/uuid` import path. + +An existing module-based consumer will continue to be able to build using the `github.com/gofrs/uuid/v3` import path using any valid consumer `go.mod` that worked prior to the publishing of v3.2.0, but any module-based consumer should start using the `github.com/gofrs/uuid` import path when possible and _must_ use the `github.com/gofrs/uuid` import path prior to upgrading to v3.2.0. + +Please refer to [Issue #61](https://github.com/gofrs/uuid/issues/61) and [Issue #66](https://github.com/gofrs/uuid/issues/66) for more details. + +## Usage + +Here is a quick overview of how to use this package. For more detailed +documentation, please see the [GoDoc Page](http://godoc.org/github.com/gofrs/uuid). + +```go +package main + +import ( + "log" + + "github.com/gofrs/uuid" +) + +// Create a Version 4 UUID, panicking on error. +// Use this form to initialize package-level variables. +var u1 = uuid.Must(uuid.NewV4()) + +func main() { + // Create a Version 4 UUID. + u2, err := uuid.NewV4() + if err != nil { + log.Fatalf("failed to generate UUID: %v", err) + } + log.Printf("generated Version 4 UUID %v", u2) + + // Parse a UUID from a string. + s := "6ba7b810-9dad-11d1-80b4-00c04fd430c8" + u3, err := uuid.FromString(s) + if err != nil { + log.Fatalf("failed to parse UUID %q: %v", s, err) + } + log.Printf("successfully parsed UUID %v", u3) +} +``` + +## References + +* [RFC-4122](https://tools.ietf.org/html/rfc4122) +* [DCE 1.1: Authentication and Security Services](http://pubs.opengroup.org/onlinepubs/9696989899/chap5.htm#tagcjh_08_02_01_01) diff --git a/vendor/github.com/gofrs/uuid/codec.go b/vendor/github.com/gofrs/uuid/codec.go new file mode 100644 index 00000000..e3014c68 --- /dev/null +++ b/vendor/github.com/gofrs/uuid/codec.go @@ -0,0 +1,212 @@ +// Copyright (C) 2013-2018 by Maxim Bublis +// +// Permission is hereby granted, free of charge, to any person obtaining +// a copy of this software and associated documentation files (the +// "Software"), to deal in the Software without restriction, including +// without limitation the rights to use, copy, modify, merge, publish, +// distribute, sublicense, and/or sell copies of the Software, and to +// permit persons to whom the Software is furnished to do so, subject to +// the following conditions: +// +// The above copyright notice and this permission notice shall be +// included in all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +package uuid + +import ( + "bytes" + "encoding/hex" + "fmt" +) + +// FromBytes returns a UUID generated from the raw byte slice input. +// It will return an error if the slice isn't 16 bytes long. +func FromBytes(input []byte) (UUID, error) { + u := UUID{} + err := u.UnmarshalBinary(input) + return u, err +} + +// FromBytesOrNil returns a UUID generated from the raw byte slice input. +// Same behavior as FromBytes(), but returns uuid.Nil instead of an error. +func FromBytesOrNil(input []byte) UUID { + uuid, err := FromBytes(input) + if err != nil { + return Nil + } + return uuid +} + +// FromString returns a UUID parsed from the input string. +// Input is expected in a form accepted by UnmarshalText. +func FromString(input string) (UUID, error) { + u := UUID{} + err := u.UnmarshalText([]byte(input)) + return u, err +} + +// FromStringOrNil returns a UUID parsed from the input string. +// Same behavior as FromString(), but returns uuid.Nil instead of an error. +func FromStringOrNil(input string) UUID { + uuid, err := FromString(input) + if err != nil { + return Nil + } + return uuid +} + +// MarshalText implements the encoding.TextMarshaler interface. +// The encoding is the same as returned by the String() method. +func (u UUID) MarshalText() ([]byte, error) { + return []byte(u.String()), nil +} + +// UnmarshalText implements the encoding.TextUnmarshaler interface. +// Following formats are supported: +// +// "6ba7b810-9dad-11d1-80b4-00c04fd430c8", +// "{6ba7b810-9dad-11d1-80b4-00c04fd430c8}", +// "urn:uuid:6ba7b810-9dad-11d1-80b4-00c04fd430c8" +// "6ba7b8109dad11d180b400c04fd430c8" +// "{6ba7b8109dad11d180b400c04fd430c8}", +// "urn:uuid:6ba7b8109dad11d180b400c04fd430c8" +// +// ABNF for supported UUID text representation follows: +// +// URN := 'urn' +// UUID-NID := 'uuid' +// +// hexdig := '0' | '1' | '2' | '3' | '4' | '5' | '6' | '7' | '8' | '9' | +// 'a' | 'b' | 'c' | 'd' | 'e' | 'f' | +// 'A' | 'B' | 'C' | 'D' | 'E' | 'F' +// +// hexoct := hexdig hexdig +// 2hexoct := hexoct hexoct +// 4hexoct := 2hexoct 2hexoct +// 6hexoct := 4hexoct 2hexoct +// 12hexoct := 6hexoct 6hexoct +// +// hashlike := 12hexoct +// canonical := 4hexoct '-' 2hexoct '-' 2hexoct '-' 6hexoct +// +// plain := canonical | hashlike +// uuid := canonical | hashlike | braced | urn +// +// braced := '{' plain '}' | '{' hashlike '}' +// urn := URN ':' UUID-NID ':' plain +// +func (u *UUID) UnmarshalText(text []byte) error { + switch len(text) { + case 32: + return u.decodeHashLike(text) + case 34, 38: + return u.decodeBraced(text) + case 36: + return u.decodeCanonical(text) + case 41, 45: + return u.decodeURN(text) + default: + return fmt.Errorf("uuid: incorrect UUID length %d in string %q", len(text), text) + } +} + +// decodeCanonical decodes UUID strings that are formatted as defined in RFC-4122 (section 3): +// "6ba7b810-9dad-11d1-80b4-00c04fd430c8". +func (u *UUID) decodeCanonical(t []byte) error { + if t[8] != '-' || t[13] != '-' || t[18] != '-' || t[23] != '-' { + return fmt.Errorf("uuid: incorrect UUID format in string %q", t) + } + + src := t + dst := u[:] + + for i, byteGroup := range byteGroups { + if i > 0 { + src = src[1:] // skip dash + } + _, err := hex.Decode(dst[:byteGroup/2], src[:byteGroup]) + if err != nil { + return err + } + src = src[byteGroup:] + dst = dst[byteGroup/2:] + } + + return nil +} + +// decodeHashLike decodes UUID strings that are using the following format: +// "6ba7b8109dad11d180b400c04fd430c8". +func (u *UUID) decodeHashLike(t []byte) error { + src := t[:] + dst := u[:] + + _, err := hex.Decode(dst, src) + return err +} + +// decodeBraced decodes UUID strings that are using the following formats: +// "{6ba7b810-9dad-11d1-80b4-00c04fd430c8}" +// "{6ba7b8109dad11d180b400c04fd430c8}". +func (u *UUID) decodeBraced(t []byte) error { + l := len(t) + + if t[0] != '{' || t[l-1] != '}' { + return fmt.Errorf("uuid: incorrect UUID format in string %q", t) + } + + return u.decodePlain(t[1 : l-1]) +} + +// decodeURN decodes UUID strings that are using the following formats: +// "urn:uuid:6ba7b810-9dad-11d1-80b4-00c04fd430c8" +// "urn:uuid:6ba7b8109dad11d180b400c04fd430c8". +func (u *UUID) decodeURN(t []byte) error { + total := len(t) + + urnUUIDPrefix := t[:9] + + if !bytes.Equal(urnUUIDPrefix, urnPrefix) { + return fmt.Errorf("uuid: incorrect UUID format in string %q", t) + } + + return u.decodePlain(t[9:total]) +} + +// decodePlain decodes UUID strings that are using the following formats: +// "6ba7b810-9dad-11d1-80b4-00c04fd430c8" or in hash-like format +// "6ba7b8109dad11d180b400c04fd430c8". +func (u *UUID) decodePlain(t []byte) error { + switch len(t) { + case 32: + return u.decodeHashLike(t) + case 36: + return u.decodeCanonical(t) + default: + return fmt.Errorf("uuid: incorrect UUID length %d in string %q", len(t), t) + } +} + +// MarshalBinary implements the encoding.BinaryMarshaler interface. +func (u UUID) MarshalBinary() ([]byte, error) { + return u.Bytes(), nil +} + +// UnmarshalBinary implements the encoding.BinaryUnmarshaler interface. +// It will return an error if the slice isn't 16 bytes long. +func (u *UUID) UnmarshalBinary(data []byte) error { + if len(data) != Size { + return fmt.Errorf("uuid: UUID must be exactly 16 bytes long, got %d bytes", len(data)) + } + copy(u[:], data) + + return nil +} diff --git a/vendor/github.com/gofrs/uuid/fuzz.go b/vendor/github.com/gofrs/uuid/fuzz.go new file mode 100644 index 00000000..afaefbc8 --- /dev/null +++ b/vendor/github.com/gofrs/uuid/fuzz.go @@ -0,0 +1,47 @@ +// Copyright (c) 2018 Andrei Tudor Călin +// +// Permission is hereby granted, free of charge, to any person obtaining +// a copy of this software and associated documentation files (the +// "Software"), to deal in the Software without restriction, including +// without limitation the rights to use, copy, modify, merge, publish, +// distribute, sublicense, and/or sell copies of the Software, and to +// permit persons to whom the Software is furnished to do so, subject to +// the following conditions: +// +// The above copyright notice and this permission notice shall be +// included in all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +// +build gofuzz + +package uuid + +// Fuzz implements a simple fuzz test for FromString / UnmarshalText. +// +// To run: +// +// $ go get github.com/dvyukov/go-fuzz/... +// $ cd $GOPATH/src/github.com/gofrs/uuid +// $ go-fuzz-build github.com/gofrs/uuid +// $ go-fuzz -bin=uuid-fuzz.zip -workdir=./testdata +// +// If you make significant changes to FromString / UnmarshalText and add +// new cases to fromStringTests (in codec_test.go), please run +// +// $ go test -seed_fuzz_corpus +// +// to seed the corpus with the new interesting inputs, then run the fuzzer. +func Fuzz(data []byte) int { + _, err := FromString(string(data)) + if err != nil { + return 0 + } + return 1 +} diff --git a/vendor/github.com/gofrs/uuid/generator.go b/vendor/github.com/gofrs/uuid/generator.go new file mode 100644 index 00000000..4257761f --- /dev/null +++ b/vendor/github.com/gofrs/uuid/generator.go @@ -0,0 +1,299 @@ +// Copyright (C) 2013-2018 by Maxim Bublis +// +// Permission is hereby granted, free of charge, to any person obtaining +// a copy of this software and associated documentation files (the +// "Software"), to deal in the Software without restriction, including +// without limitation the rights to use, copy, modify, merge, publish, +// distribute, sublicense, and/or sell copies of the Software, and to +// permit persons to whom the Software is furnished to do so, subject to +// the following conditions: +// +// The above copyright notice and this permission notice shall be +// included in all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +package uuid + +import ( + "crypto/md5" + "crypto/rand" + "crypto/sha1" + "encoding/binary" + "fmt" + "hash" + "io" + "net" + "os" + "sync" + "time" +) + +// Difference in 100-nanosecond intervals between +// UUID epoch (October 15, 1582) and Unix epoch (January 1, 1970). +const epochStart = 122192928000000000 + +type epochFunc func() time.Time + +// HWAddrFunc is the function type used to provide hardware (MAC) addresses. +type HWAddrFunc func() (net.HardwareAddr, error) + +// DefaultGenerator is the default UUID Generator used by this package. +var DefaultGenerator Generator = NewGen() + +var ( + posixUID = uint32(os.Getuid()) + posixGID = uint32(os.Getgid()) +) + +// NewV1 returns a UUID based on the current timestamp and MAC address. +func NewV1() (UUID, error) { + return DefaultGenerator.NewV1() +} + +// NewV2 returns a DCE Security UUID based on the POSIX UID/GID. +func NewV2(domain byte) (UUID, error) { + return DefaultGenerator.NewV2(domain) +} + +// NewV3 returns a UUID based on the MD5 hash of the namespace UUID and name. +func NewV3(ns UUID, name string) UUID { + return DefaultGenerator.NewV3(ns, name) +} + +// NewV4 returns a randomly generated UUID. +func NewV4() (UUID, error) { + return DefaultGenerator.NewV4() +} + +// NewV5 returns a UUID based on SHA-1 hash of the namespace UUID and name. +func NewV5(ns UUID, name string) UUID { + return DefaultGenerator.NewV5(ns, name) +} + +// Generator provides an interface for generating UUIDs. +type Generator interface { + NewV1() (UUID, error) + NewV2(domain byte) (UUID, error) + NewV3(ns UUID, name string) UUID + NewV4() (UUID, error) + NewV5(ns UUID, name string) UUID +} + +// Gen is a reference UUID generator based on the specifications laid out in +// RFC-4122 and DCE 1.1: Authentication and Security Services. This type +// satisfies the Generator interface as defined in this package. +// +// For consumers who are generating V1 UUIDs, but don't want to expose the MAC +// address of the node generating the UUIDs, the NewGenWithHWAF() function has been +// provided as a convenience. See the function's documentation for more info. +// +// The authors of this package do not feel that the majority of users will need +// to obfuscate their MAC address, and so we recommend using NewGen() to create +// a new generator. +type Gen struct { + clockSequenceOnce sync.Once + hardwareAddrOnce sync.Once + storageMutex sync.Mutex + + rand io.Reader + + epochFunc epochFunc + hwAddrFunc HWAddrFunc + lastTime uint64 + clockSequence uint16 + hardwareAddr [6]byte +} + +// interface check -- build will fail if *Gen doesn't satisfy Generator +var _ Generator = (*Gen)(nil) + +// NewGen returns a new instance of Gen with some default values set. Most +// people should use this. +func NewGen() *Gen { + return NewGenWithHWAF(defaultHWAddrFunc) +} + +// NewGenWithHWAF builds a new UUID generator with the HWAddrFunc provided. Most +// consumers should use NewGen() instead. +// +// This is used so that consumers can generate their own MAC addresses, for use +// in the generated UUIDs, if there is some concern about exposing the physical +// address of the machine generating the UUID. +// +// The Gen generator will only invoke the HWAddrFunc once, and cache that MAC +// address for all the future UUIDs generated by it. If you'd like to switch the +// MAC address being used, you'll need to create a new generator using this +// function. +func NewGenWithHWAF(hwaf HWAddrFunc) *Gen { + return &Gen{ + epochFunc: time.Now, + hwAddrFunc: hwaf, + rand: rand.Reader, + } +} + +// NewV1 returns a UUID based on the current timestamp and MAC address. +func (g *Gen) NewV1() (UUID, error) { + u := UUID{} + + timeNow, clockSeq, err := g.getClockSequence() + if err != nil { + return Nil, err + } + binary.BigEndian.PutUint32(u[0:], uint32(timeNow)) + binary.BigEndian.PutUint16(u[4:], uint16(timeNow>>32)) + binary.BigEndian.PutUint16(u[6:], uint16(timeNow>>48)) + binary.BigEndian.PutUint16(u[8:], clockSeq) + + hardwareAddr, err := g.getHardwareAddr() + if err != nil { + return Nil, err + } + copy(u[10:], hardwareAddr) + + u.SetVersion(V1) + u.SetVariant(VariantRFC4122) + + return u, nil +} + +// NewV2 returns a DCE Security UUID based on the POSIX UID/GID. +func (g *Gen) NewV2(domain byte) (UUID, error) { + u, err := g.NewV1() + if err != nil { + return Nil, err + } + + switch domain { + case DomainPerson: + binary.BigEndian.PutUint32(u[:], posixUID) + case DomainGroup: + binary.BigEndian.PutUint32(u[:], posixGID) + } + + u[9] = domain + + u.SetVersion(V2) + u.SetVariant(VariantRFC4122) + + return u, nil +} + +// NewV3 returns a UUID based on the MD5 hash of the namespace UUID and name. +func (g *Gen) NewV3(ns UUID, name string) UUID { + u := newFromHash(md5.New(), ns, name) + u.SetVersion(V3) + u.SetVariant(VariantRFC4122) + + return u +} + +// NewV4 returns a randomly generated UUID. +func (g *Gen) NewV4() (UUID, error) { + u := UUID{} + if _, err := io.ReadFull(g.rand, u[:]); err != nil { + return Nil, err + } + u.SetVersion(V4) + u.SetVariant(VariantRFC4122) + + return u, nil +} + +// NewV5 returns a UUID based on SHA-1 hash of the namespace UUID and name. +func (g *Gen) NewV5(ns UUID, name string) UUID { + u := newFromHash(sha1.New(), ns, name) + u.SetVersion(V5) + u.SetVariant(VariantRFC4122) + + return u +} + +// Returns the epoch and clock sequence. +func (g *Gen) getClockSequence() (uint64, uint16, error) { + var err error + g.clockSequenceOnce.Do(func() { + buf := make([]byte, 2) + if _, err = io.ReadFull(g.rand, buf); err != nil { + return + } + g.clockSequence = binary.BigEndian.Uint16(buf) + }) + if err != nil { + return 0, 0, err + } + + g.storageMutex.Lock() + defer g.storageMutex.Unlock() + + timeNow := g.getEpoch() + // Clock didn't change since last UUID generation. + // Should increase clock sequence. + if timeNow <= g.lastTime { + g.clockSequence++ + } + g.lastTime = timeNow + + return timeNow, g.clockSequence, nil +} + +// Returns the hardware address. +func (g *Gen) getHardwareAddr() ([]byte, error) { + var err error + g.hardwareAddrOnce.Do(func() { + var hwAddr net.HardwareAddr + if hwAddr, err = g.hwAddrFunc(); err == nil { + copy(g.hardwareAddr[:], hwAddr) + return + } + + // Initialize hardwareAddr randomly in case + // of real network interfaces absence. + if _, err = io.ReadFull(g.rand, g.hardwareAddr[:]); err != nil { + return + } + // Set multicast bit as recommended by RFC-4122 + g.hardwareAddr[0] |= 0x01 + }) + if err != nil { + return []byte{}, err + } + return g.hardwareAddr[:], nil +} + +// Returns the difference between UUID epoch (October 15, 1582) +// and current time in 100-nanosecond intervals. +func (g *Gen) getEpoch() uint64 { + return epochStart + uint64(g.epochFunc().UnixNano()/100) +} + +// Returns the UUID based on the hashing of the namespace UUID and name. +func newFromHash(h hash.Hash, ns UUID, name string) UUID { + u := UUID{} + h.Write(ns[:]) + h.Write([]byte(name)) + copy(u[:], h.Sum(nil)) + + return u +} + +// Returns the hardware address. +func defaultHWAddrFunc() (net.HardwareAddr, error) { + ifaces, err := net.Interfaces() + if err != nil { + return []byte{}, err + } + for _, iface := range ifaces { + if len(iface.HardwareAddr) >= 6 { + return iface.HardwareAddr, nil + } + } + return []byte{}, fmt.Errorf("uuid: no HW address found") +} diff --git a/vendor/github.com/gofrs/uuid/sql.go b/vendor/github.com/gofrs/uuid/sql.go new file mode 100644 index 00000000..6f254a4f --- /dev/null +++ b/vendor/github.com/gofrs/uuid/sql.go @@ -0,0 +1,109 @@ +// Copyright (C) 2013-2018 by Maxim Bublis +// +// Permission is hereby granted, free of charge, to any person obtaining +// a copy of this software and associated documentation files (the +// "Software"), to deal in the Software without restriction, including +// without limitation the rights to use, copy, modify, merge, publish, +// distribute, sublicense, and/or sell copies of the Software, and to +// permit persons to whom the Software is furnished to do so, subject to +// the following conditions: +// +// The above copyright notice and this permission notice shall be +// included in all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +package uuid + +import ( + "bytes" + "database/sql/driver" + "encoding/json" + "fmt" +) + +// Value implements the driver.Valuer interface. +func (u UUID) Value() (driver.Value, error) { + return u.String(), nil +} + +// Scan implements the sql.Scanner interface. +// A 16-byte slice will be handled by UnmarshalBinary, while +// a longer byte slice or a string will be handled by UnmarshalText. +func (u *UUID) Scan(src interface{}) error { + switch src := src.(type) { + case UUID: // support gorm convert from UUID to NullUUID + *u = src + return nil + + case []byte: + if len(src) == Size { + return u.UnmarshalBinary(src) + } + return u.UnmarshalText(src) + + case string: + return u.UnmarshalText([]byte(src)) + } + + return fmt.Errorf("uuid: cannot convert %T to UUID", src) +} + +// NullUUID can be used with the standard sql package to represent a +// UUID value that can be NULL in the database. +type NullUUID struct { + UUID UUID + Valid bool +} + +// Value implements the driver.Valuer interface. +func (u NullUUID) Value() (driver.Value, error) { + if !u.Valid { + return nil, nil + } + // Delegate to UUID Value function + return u.UUID.Value() +} + +// Scan implements the sql.Scanner interface. +func (u *NullUUID) Scan(src interface{}) error { + if src == nil { + u.UUID, u.Valid = Nil, false + return nil + } + + // Delegate to UUID Scan function + u.Valid = true + return u.UUID.Scan(src) +} + +// MarshalJSON marshals the NullUUID as null or the nested UUID +func (u NullUUID) MarshalJSON() ([]byte, error) { + if !u.Valid { + return json.Marshal(nil) + } + + return json.Marshal(u.UUID) +} + +// UnmarshalJSON unmarshals a NullUUID +func (u *NullUUID) UnmarshalJSON(b []byte) error { + if bytes.Equal(b, []byte("null")) { + u.UUID, u.Valid = Nil, false + return nil + } + + if err := json.Unmarshal(b, &u.UUID); err != nil { + return err + } + + u.Valid = true + + return nil +} diff --git a/vendor/github.com/gofrs/uuid/uuid.go b/vendor/github.com/gofrs/uuid/uuid.go new file mode 100644 index 00000000..9c4547f1 --- /dev/null +++ b/vendor/github.com/gofrs/uuid/uuid.go @@ -0,0 +1,250 @@ +// Copyright (C) 2013-2018 by Maxim Bublis +// +// Permission is hereby granted, free of charge, to any person obtaining +// a copy of this software and associated documentation files (the +// "Software"), to deal in the Software without restriction, including +// without limitation the rights to use, copy, modify, merge, publish, +// distribute, sublicense, and/or sell copies of the Software, and to +// permit persons to whom the Software is furnished to do so, subject to +// the following conditions: +// +// The above copyright notice and this permission notice shall be +// included in all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +// Package uuid provides implementations of the Universally Unique Identifier (UUID), as specified in RFC-4122 and DCE 1.1. +// +// RFC-4122[1] provides the specification for versions 1, 3, 4, and 5. +// +// DCE 1.1[2] provides the specification for version 2. +// +// [1] https://tools.ietf.org/html/rfc4122 +// [2] http://pubs.opengroup.org/onlinepubs/9696989899/chap5.htm#tagcjh_08_02_01_01 +package uuid + +import ( + "encoding/binary" + "encoding/hex" + "fmt" + "io" + "strings" + "time" +) + +// Size of a UUID in bytes. +const Size = 16 + +// UUID is an array type to represent the value of a UUID, as defined in RFC-4122. +type UUID [Size]byte + +// UUID versions. +const ( + _ byte = iota + V1 // Version 1 (date-time and MAC address) + V2 // Version 2 (date-time and MAC address, DCE security version) + V3 // Version 3 (namespace name-based) + V4 // Version 4 (random) + V5 // Version 5 (namespace name-based) +) + +// UUID layout variants. +const ( + VariantNCS byte = iota + VariantRFC4122 + VariantMicrosoft + VariantFuture +) + +// UUID DCE domains. +const ( + DomainPerson = iota + DomainGroup + DomainOrg +) + +// Timestamp is the count of 100-nanosecond intervals since 00:00:00.00, +// 15 October 1582 within a V1 UUID. This type has no meaning for V2-V5 +// UUIDs since they don't have an embedded timestamp. +type Timestamp uint64 + +const _100nsPerSecond = 10000000 + +// Time returns the UTC time.Time representation of a Timestamp +func (t Timestamp) Time() (time.Time, error) { + secs := uint64(t) / _100nsPerSecond + nsecs := 100 * (uint64(t) % _100nsPerSecond) + return time.Unix(int64(secs)-(epochStart/_100nsPerSecond), int64(nsecs)), nil +} + +// TimestampFromV1 returns the Timestamp embedded within a V1 UUID. +// Returns an error if the UUID is any version other than 1. +func TimestampFromV1(u UUID) (Timestamp, error) { + if u.Version() != 1 { + err := fmt.Errorf("uuid: %s is version %d, not version 1", u, u.Version()) + return 0, err + } + low := binary.BigEndian.Uint32(u[0:4]) + mid := binary.BigEndian.Uint16(u[4:6]) + hi := binary.BigEndian.Uint16(u[6:8]) & 0xfff + return Timestamp(uint64(low) + (uint64(mid) << 32) + (uint64(hi) << 48)), nil +} + +// String parse helpers. +var ( + urnPrefix = []byte("urn:uuid:") + byteGroups = []int{8, 4, 4, 4, 12} +) + +// Nil is the nil UUID, as specified in RFC-4122, that has all 128 bits set to +// zero. +var Nil = UUID{} + +// Predefined namespace UUIDs. +var ( + NamespaceDNS = Must(FromString("6ba7b810-9dad-11d1-80b4-00c04fd430c8")) + NamespaceURL = Must(FromString("6ba7b811-9dad-11d1-80b4-00c04fd430c8")) + NamespaceOID = Must(FromString("6ba7b812-9dad-11d1-80b4-00c04fd430c8")) + NamespaceX500 = Must(FromString("6ba7b814-9dad-11d1-80b4-00c04fd430c8")) +) + +// Version returns the algorithm version used to generate the UUID. +func (u UUID) Version() byte { + return u[6] >> 4 +} + +// Variant returns the UUID layout variant. +func (u UUID) Variant() byte { + switch { + case (u[8] >> 7) == 0x00: + return VariantNCS + case (u[8] >> 6) == 0x02: + return VariantRFC4122 + case (u[8] >> 5) == 0x06: + return VariantMicrosoft + case (u[8] >> 5) == 0x07: + fallthrough + default: + return VariantFuture + } +} + +// Bytes returns a byte slice representation of the UUID. +func (u UUID) Bytes() []byte { + return u[:] +} + +// String returns a canonical RFC-4122 string representation of the UUID: +// xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx. +func (u UUID) String() string { + buf := make([]byte, 36) + + hex.Encode(buf[0:8], u[0:4]) + buf[8] = '-' + hex.Encode(buf[9:13], u[4:6]) + buf[13] = '-' + hex.Encode(buf[14:18], u[6:8]) + buf[18] = '-' + hex.Encode(buf[19:23], u[8:10]) + buf[23] = '-' + hex.Encode(buf[24:], u[10:]) + + return string(buf) +} + +// Format implements fmt.Formatter for UUID values. +// +// The behavior is as follows: +// The 'x' and 'X' verbs output only the hex digits of the UUID, using a-f for 'x' and A-F for 'X'. +// The 'v', '+v', 's' and 'q' verbs return the canonical RFC-4122 string representation. +// The 'S' verb returns the RFC-4122 format, but with capital hex digits. +// The '#v' verb returns the "Go syntax" representation, which is a 16 byte array initializer. +// All other verbs not handled directly by the fmt package (like '%p') are unsupported and will return +// "%!verb(uuid.UUID=value)" as recommended by the fmt package. +func (u UUID) Format(f fmt.State, c rune) { + switch c { + case 'x', 'X': + s := hex.EncodeToString(u.Bytes()) + if c == 'X' { + s = strings.Map(toCapitalHexDigits, s) + } + _, _ = io.WriteString(f, s) + case 'v': + var s string + if f.Flag('#') { + s = fmt.Sprintf("%#v", [Size]byte(u)) + } else { + s = u.String() + } + _, _ = io.WriteString(f, s) + case 's', 'S': + s := u.String() + if c == 'S' { + s = strings.Map(toCapitalHexDigits, s) + } + _, _ = io.WriteString(f, s) + case 'q': + _, _ = io.WriteString(f, `"`+u.String()+`"`) + default: + // invalid/unsupported format verb + fmt.Fprintf(f, "%%!%c(uuid.UUID=%s)", c, u.String()) + } +} + +func toCapitalHexDigits(ch rune) rune { + // convert a-f hex digits to A-F + switch ch { + case 'a': + return 'A' + case 'b': + return 'B' + case 'c': + return 'C' + case 'd': + return 'D' + case 'e': + return 'E' + case 'f': + return 'F' + default: + return ch + } +} + +// SetVersion sets the version bits. +func (u *UUID) SetVersion(v byte) { + u[6] = (u[6] & 0x0f) | (v << 4) +} + +// SetVariant sets the variant bits. +func (u *UUID) SetVariant(v byte) { + switch v { + case VariantNCS: + u[8] = (u[8]&(0xff>>1) | (0x00 << 7)) + case VariantRFC4122: + u[8] = (u[8]&(0xff>>2) | (0x02 << 6)) + case VariantMicrosoft: + u[8] = (u[8]&(0xff>>3) | (0x06 << 5)) + case VariantFuture: + fallthrough + default: + u[8] = (u[8]&(0xff>>3) | (0x07 << 5)) + } +} + +// Must is a helper that wraps a call to a function returning (UUID, error) +// and panics if the error is non-nil. It is intended for use in variable +// initializations such as +// var packageUUID = uuid.Must(uuid.FromString("123e4567-e89b-12d3-a456-426655440000")) +func Must(u UUID, err error) UUID { + if err != nil { + panic(err) + } + return u +} diff --git a/vendor/github.com/gorilla/websocket/AUTHORS b/vendor/github.com/gorilla/websocket/AUTHORS new file mode 100644 index 00000000..1931f400 --- /dev/null +++ b/vendor/github.com/gorilla/websocket/AUTHORS @@ -0,0 +1,9 @@ +# This is the official list of Gorilla WebSocket authors for copyright +# purposes. +# +# Please keep the list sorted. + +Gary Burd +Google LLC (https://opensource.google.com/) +Joachim Bauch + diff --git a/vendor/github.com/gorilla/websocket/LICENSE b/vendor/github.com/gorilla/websocket/LICENSE new file mode 100644 index 00000000..9171c972 --- /dev/null +++ b/vendor/github.com/gorilla/websocket/LICENSE @@ -0,0 +1,22 @@ +Copyright (c) 2013 The Gorilla WebSocket Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/gorilla/websocket/README.md b/vendor/github.com/gorilla/websocket/README.md new file mode 100644 index 00000000..f26fd466 --- /dev/null +++ b/vendor/github.com/gorilla/websocket/README.md @@ -0,0 +1,64 @@ +# Gorilla WebSocket + +Gorilla WebSocket is a [Go](http://golang.org/) implementation of the +[WebSocket](http://www.rfc-editor.org/rfc/rfc6455.txt) protocol. + +[![Build Status](https://travis-ci.org/gorilla/websocket.svg?branch=master)](https://travis-ci.org/gorilla/websocket) +[![GoDoc](https://godoc.org/github.com/gorilla/websocket?status.svg)](https://godoc.org/github.com/gorilla/websocket) + +### Documentation + +* [API Reference](http://godoc.org/github.com/gorilla/websocket) +* [Chat example](https://github.com/gorilla/websocket/tree/master/examples/chat) +* [Command example](https://github.com/gorilla/websocket/tree/master/examples/command) +* [Client and server example](https://github.com/gorilla/websocket/tree/master/examples/echo) +* [File watch example](https://github.com/gorilla/websocket/tree/master/examples/filewatch) + +### Status + +The Gorilla WebSocket package provides a complete and tested implementation of +the [WebSocket](http://www.rfc-editor.org/rfc/rfc6455.txt) protocol. The +package API is stable. + +### Installation + + go get github.com/gorilla/websocket + +### Protocol Compliance + +The Gorilla WebSocket package passes the server tests in the [Autobahn Test +Suite](https://github.com/crossbario/autobahn-testsuite) using the application in the [examples/autobahn +subdirectory](https://github.com/gorilla/websocket/tree/master/examples/autobahn). + +### Gorilla WebSocket compared with other packages + + + + + + + + + + + + + + + + + + +
github.com/gorillagolang.org/x/net
RFC 6455 Features
Passes Autobahn Test SuiteYesNo
Receive fragmented messageYesNo, see note 1
Send close messageYesNo
Send pings and receive pongsYesNo
Get the type of a received data messageYesYes, see note 2
Other Features
Compression ExtensionsExperimentalNo
Read message using io.ReaderYesNo, see note 3
Write message using io.WriteCloserYesNo, see note 3
+ +Notes: + +1. Large messages are fragmented in [Chrome's new WebSocket implementation](http://www.ietf.org/mail-archive/web/hybi/current/msg10503.html). +2. The application can get the type of a received data message by implementing + a [Codec marshal](http://godoc.org/golang.org/x/net/websocket#Codec.Marshal) + function. +3. The go.net io.Reader and io.Writer operate across WebSocket frame boundaries. + Read returns when the input buffer is full or a frame boundary is + encountered. Each call to Write sends a single frame message. The Gorilla + io.Reader and io.WriteCloser operate on a single WebSocket message. + diff --git a/vendor/github.com/gorilla/websocket/client.go b/vendor/github.com/gorilla/websocket/client.go new file mode 100644 index 00000000..962c06a3 --- /dev/null +++ b/vendor/github.com/gorilla/websocket/client.go @@ -0,0 +1,395 @@ +// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package websocket + +import ( + "bytes" + "context" + "crypto/tls" + "errors" + "io" + "io/ioutil" + "net" + "net/http" + "net/http/httptrace" + "net/url" + "strings" + "time" +) + +// ErrBadHandshake is returned when the server response to opening handshake is +// invalid. +var ErrBadHandshake = errors.New("websocket: bad handshake") + +var errInvalidCompression = errors.New("websocket: invalid compression negotiation") + +// NewClient creates a new client connection using the given net connection. +// The URL u specifies the host and request URI. Use requestHeader to specify +// the origin (Origin), subprotocols (Sec-WebSocket-Protocol) and cookies +// (Cookie). Use the response.Header to get the selected subprotocol +// (Sec-WebSocket-Protocol) and cookies (Set-Cookie). +// +// If the WebSocket handshake fails, ErrBadHandshake is returned along with a +// non-nil *http.Response so that callers can handle redirects, authentication, +// etc. +// +// Deprecated: Use Dialer instead. +func NewClient(netConn net.Conn, u *url.URL, requestHeader http.Header, readBufSize, writeBufSize int) (c *Conn, response *http.Response, err error) { + d := Dialer{ + ReadBufferSize: readBufSize, + WriteBufferSize: writeBufSize, + NetDial: func(net, addr string) (net.Conn, error) { + return netConn, nil + }, + } + return d.Dial(u.String(), requestHeader) +} + +// A Dialer contains options for connecting to WebSocket server. +type Dialer struct { + // NetDial specifies the dial function for creating TCP connections. If + // NetDial is nil, net.Dial is used. + NetDial func(network, addr string) (net.Conn, error) + + // NetDialContext specifies the dial function for creating TCP connections. If + // NetDialContext is nil, net.DialContext is used. + NetDialContext func(ctx context.Context, network, addr string) (net.Conn, error) + + // Proxy specifies a function to return a proxy for a given + // Request. If the function returns a non-nil error, the + // request is aborted with the provided error. + // If Proxy is nil or returns a nil *URL, no proxy is used. + Proxy func(*http.Request) (*url.URL, error) + + // TLSClientConfig specifies the TLS configuration to use with tls.Client. + // If nil, the default configuration is used. + TLSClientConfig *tls.Config + + // HandshakeTimeout specifies the duration for the handshake to complete. + HandshakeTimeout time.Duration + + // ReadBufferSize and WriteBufferSize specify I/O buffer sizes in bytes. If a buffer + // size is zero, then a useful default size is used. The I/O buffer sizes + // do not limit the size of the messages that can be sent or received. + ReadBufferSize, WriteBufferSize int + + // WriteBufferPool is a pool of buffers for write operations. If the value + // is not set, then write buffers are allocated to the connection for the + // lifetime of the connection. + // + // A pool is most useful when the application has a modest volume of writes + // across a large number of connections. + // + // Applications should use a single pool for each unique value of + // WriteBufferSize. + WriteBufferPool BufferPool + + // Subprotocols specifies the client's requested subprotocols. + Subprotocols []string + + // EnableCompression specifies if the client should attempt to negotiate + // per message compression (RFC 7692). Setting this value to true does not + // guarantee that compression will be supported. Currently only "no context + // takeover" modes are supported. + EnableCompression bool + + // Jar specifies the cookie jar. + // If Jar is nil, cookies are not sent in requests and ignored + // in responses. + Jar http.CookieJar +} + +// Dial creates a new client connection by calling DialContext with a background context. +func (d *Dialer) Dial(urlStr string, requestHeader http.Header) (*Conn, *http.Response, error) { + return d.DialContext(context.Background(), urlStr, requestHeader) +} + +var errMalformedURL = errors.New("malformed ws or wss URL") + +func hostPortNoPort(u *url.URL) (hostPort, hostNoPort string) { + hostPort = u.Host + hostNoPort = u.Host + if i := strings.LastIndex(u.Host, ":"); i > strings.LastIndex(u.Host, "]") { + hostNoPort = hostNoPort[:i] + } else { + switch u.Scheme { + case "wss": + hostPort += ":443" + case "https": + hostPort += ":443" + default: + hostPort += ":80" + } + } + return hostPort, hostNoPort +} + +// DefaultDialer is a dialer with all fields set to the default values. +var DefaultDialer = &Dialer{ + Proxy: http.ProxyFromEnvironment, + HandshakeTimeout: 45 * time.Second, +} + +// nilDialer is dialer to use when receiver is nil. +var nilDialer = *DefaultDialer + +// DialContext creates a new client connection. Use requestHeader to specify the +// origin (Origin), subprotocols (Sec-WebSocket-Protocol) and cookies (Cookie). +// Use the response.Header to get the selected subprotocol +// (Sec-WebSocket-Protocol) and cookies (Set-Cookie). +// +// The context will be used in the request and in the Dialer. +// +// If the WebSocket handshake fails, ErrBadHandshake is returned along with a +// non-nil *http.Response so that callers can handle redirects, authentication, +// etcetera. The response body may not contain the entire response and does not +// need to be closed by the application. +func (d *Dialer) DialContext(ctx context.Context, urlStr string, requestHeader http.Header) (*Conn, *http.Response, error) { + if d == nil { + d = &nilDialer + } + + challengeKey, err := generateChallengeKey() + if err != nil { + return nil, nil, err + } + + u, err := url.Parse(urlStr) + if err != nil { + return nil, nil, err + } + + switch u.Scheme { + case "ws": + u.Scheme = "http" + case "wss": + u.Scheme = "https" + default: + return nil, nil, errMalformedURL + } + + if u.User != nil { + // User name and password are not allowed in websocket URIs. + return nil, nil, errMalformedURL + } + + req := &http.Request{ + Method: "GET", + URL: u, + Proto: "HTTP/1.1", + ProtoMajor: 1, + ProtoMinor: 1, + Header: make(http.Header), + Host: u.Host, + } + req = req.WithContext(ctx) + + // Set the cookies present in the cookie jar of the dialer + if d.Jar != nil { + for _, cookie := range d.Jar.Cookies(u) { + req.AddCookie(cookie) + } + } + + // Set the request headers using the capitalization for names and values in + // RFC examples. Although the capitalization shouldn't matter, there are + // servers that depend on it. The Header.Set method is not used because the + // method canonicalizes the header names. + req.Header["Upgrade"] = []string{"websocket"} + req.Header["Connection"] = []string{"Upgrade"} + req.Header["Sec-WebSocket-Key"] = []string{challengeKey} + req.Header["Sec-WebSocket-Version"] = []string{"13"} + if len(d.Subprotocols) > 0 { + req.Header["Sec-WebSocket-Protocol"] = []string{strings.Join(d.Subprotocols, ", ")} + } + for k, vs := range requestHeader { + switch { + case k == "Host": + if len(vs) > 0 { + req.Host = vs[0] + } + case k == "Upgrade" || + k == "Connection" || + k == "Sec-Websocket-Key" || + k == "Sec-Websocket-Version" || + k == "Sec-Websocket-Extensions" || + (k == "Sec-Websocket-Protocol" && len(d.Subprotocols) > 0): + return nil, nil, errors.New("websocket: duplicate header not allowed: " + k) + case k == "Sec-Websocket-Protocol": + req.Header["Sec-WebSocket-Protocol"] = vs + default: + req.Header[k] = vs + } + } + + if d.EnableCompression { + req.Header["Sec-WebSocket-Extensions"] = []string{"permessage-deflate; server_no_context_takeover; client_no_context_takeover"} + } + + if d.HandshakeTimeout != 0 { + var cancel func() + ctx, cancel = context.WithTimeout(ctx, d.HandshakeTimeout) + defer cancel() + } + + // Get network dial function. + var netDial func(network, add string) (net.Conn, error) + + if d.NetDialContext != nil { + netDial = func(network, addr string) (net.Conn, error) { + return d.NetDialContext(ctx, network, addr) + } + } else if d.NetDial != nil { + netDial = d.NetDial + } else { + netDialer := &net.Dialer{} + netDial = func(network, addr string) (net.Conn, error) { + return netDialer.DialContext(ctx, network, addr) + } + } + + // If needed, wrap the dial function to set the connection deadline. + if deadline, ok := ctx.Deadline(); ok { + forwardDial := netDial + netDial = func(network, addr string) (net.Conn, error) { + c, err := forwardDial(network, addr) + if err != nil { + return nil, err + } + err = c.SetDeadline(deadline) + if err != nil { + c.Close() + return nil, err + } + return c, nil + } + } + + // If needed, wrap the dial function to connect through a proxy. + if d.Proxy != nil { + proxyURL, err := d.Proxy(req) + if err != nil { + return nil, nil, err + } + if proxyURL != nil { + dialer, err := proxy_FromURL(proxyURL, netDialerFunc(netDial)) + if err != nil { + return nil, nil, err + } + netDial = dialer.Dial + } + } + + hostPort, hostNoPort := hostPortNoPort(u) + trace := httptrace.ContextClientTrace(ctx) + if trace != nil && trace.GetConn != nil { + trace.GetConn(hostPort) + } + + netConn, err := netDial("tcp", hostPort) + if trace != nil && trace.GotConn != nil { + trace.GotConn(httptrace.GotConnInfo{ + Conn: netConn, + }) + } + if err != nil { + return nil, nil, err + } + + defer func() { + if netConn != nil { + netConn.Close() + } + }() + + if u.Scheme == "https" { + cfg := cloneTLSConfig(d.TLSClientConfig) + if cfg.ServerName == "" { + cfg.ServerName = hostNoPort + } + tlsConn := tls.Client(netConn, cfg) + netConn = tlsConn + + var err error + if trace != nil { + err = doHandshakeWithTrace(trace, tlsConn, cfg) + } else { + err = doHandshake(tlsConn, cfg) + } + + if err != nil { + return nil, nil, err + } + } + + conn := newConn(netConn, false, d.ReadBufferSize, d.WriteBufferSize, d.WriteBufferPool, nil, nil) + + if err := req.Write(netConn); err != nil { + return nil, nil, err + } + + if trace != nil && trace.GotFirstResponseByte != nil { + if peek, err := conn.br.Peek(1); err == nil && len(peek) == 1 { + trace.GotFirstResponseByte() + } + } + + resp, err := http.ReadResponse(conn.br, req) + if err != nil { + return nil, nil, err + } + + if d.Jar != nil { + if rc := resp.Cookies(); len(rc) > 0 { + d.Jar.SetCookies(u, rc) + } + } + + if resp.StatusCode != 101 || + !strings.EqualFold(resp.Header.Get("Upgrade"), "websocket") || + !strings.EqualFold(resp.Header.Get("Connection"), "upgrade") || + resp.Header.Get("Sec-Websocket-Accept") != computeAcceptKey(challengeKey) { + // Before closing the network connection on return from this + // function, slurp up some of the response to aid application + // debugging. + buf := make([]byte, 1024) + n, _ := io.ReadFull(resp.Body, buf) + resp.Body = ioutil.NopCloser(bytes.NewReader(buf[:n])) + return nil, resp, ErrBadHandshake + } + + for _, ext := range parseExtensions(resp.Header) { + if ext[""] != "permessage-deflate" { + continue + } + _, snct := ext["server_no_context_takeover"] + _, cnct := ext["client_no_context_takeover"] + if !snct || !cnct { + return nil, resp, errInvalidCompression + } + conn.newCompressionWriter = compressNoContextTakeover + conn.newDecompressionReader = decompressNoContextTakeover + break + } + + resp.Body = ioutil.NopCloser(bytes.NewReader([]byte{})) + conn.subprotocol = resp.Header.Get("Sec-Websocket-Protocol") + + netConn.SetDeadline(time.Time{}) + netConn = nil // to avoid close in defer. + return conn, resp, nil +} + +func doHandshake(tlsConn *tls.Conn, cfg *tls.Config) error { + if err := tlsConn.Handshake(); err != nil { + return err + } + if !cfg.InsecureSkipVerify { + if err := tlsConn.VerifyHostname(cfg.ServerName); err != nil { + return err + } + } + return nil +} diff --git a/vendor/github.com/gorilla/websocket/client_clone.go b/vendor/github.com/gorilla/websocket/client_clone.go new file mode 100644 index 00000000..4f0d9437 --- /dev/null +++ b/vendor/github.com/gorilla/websocket/client_clone.go @@ -0,0 +1,16 @@ +// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.8 + +package websocket + +import "crypto/tls" + +func cloneTLSConfig(cfg *tls.Config) *tls.Config { + if cfg == nil { + return &tls.Config{} + } + return cfg.Clone() +} diff --git a/vendor/github.com/gorilla/websocket/client_clone_legacy.go b/vendor/github.com/gorilla/websocket/client_clone_legacy.go new file mode 100644 index 00000000..babb007f --- /dev/null +++ b/vendor/github.com/gorilla/websocket/client_clone_legacy.go @@ -0,0 +1,38 @@ +// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !go1.8 + +package websocket + +import "crypto/tls" + +// cloneTLSConfig clones all public fields except the fields +// SessionTicketsDisabled and SessionTicketKey. This avoids copying the +// sync.Mutex in the sync.Once and makes it safe to call cloneTLSConfig on a +// config in active use. +func cloneTLSConfig(cfg *tls.Config) *tls.Config { + if cfg == nil { + return &tls.Config{} + } + return &tls.Config{ + Rand: cfg.Rand, + Time: cfg.Time, + Certificates: cfg.Certificates, + NameToCertificate: cfg.NameToCertificate, + GetCertificate: cfg.GetCertificate, + RootCAs: cfg.RootCAs, + NextProtos: cfg.NextProtos, + ServerName: cfg.ServerName, + ClientAuth: cfg.ClientAuth, + ClientCAs: cfg.ClientCAs, + InsecureSkipVerify: cfg.InsecureSkipVerify, + CipherSuites: cfg.CipherSuites, + PreferServerCipherSuites: cfg.PreferServerCipherSuites, + ClientSessionCache: cfg.ClientSessionCache, + MinVersion: cfg.MinVersion, + MaxVersion: cfg.MaxVersion, + CurvePreferences: cfg.CurvePreferences, + } +} diff --git a/vendor/github.com/gorilla/websocket/compression.go b/vendor/github.com/gorilla/websocket/compression.go new file mode 100644 index 00000000..813ffb1e --- /dev/null +++ b/vendor/github.com/gorilla/websocket/compression.go @@ -0,0 +1,148 @@ +// Copyright 2017 The Gorilla WebSocket Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package websocket + +import ( + "compress/flate" + "errors" + "io" + "strings" + "sync" +) + +const ( + minCompressionLevel = -2 // flate.HuffmanOnly not defined in Go < 1.6 + maxCompressionLevel = flate.BestCompression + defaultCompressionLevel = 1 +) + +var ( + flateWriterPools [maxCompressionLevel - minCompressionLevel + 1]sync.Pool + flateReaderPool = sync.Pool{New: func() interface{} { + return flate.NewReader(nil) + }} +) + +func decompressNoContextTakeover(r io.Reader) io.ReadCloser { + const tail = + // Add four bytes as specified in RFC + "\x00\x00\xff\xff" + + // Add final block to squelch unexpected EOF error from flate reader. + "\x01\x00\x00\xff\xff" + + fr, _ := flateReaderPool.Get().(io.ReadCloser) + fr.(flate.Resetter).Reset(io.MultiReader(r, strings.NewReader(tail)), nil) + return &flateReadWrapper{fr} +} + +func isValidCompressionLevel(level int) bool { + return minCompressionLevel <= level && level <= maxCompressionLevel +} + +func compressNoContextTakeover(w io.WriteCloser, level int) io.WriteCloser { + p := &flateWriterPools[level-minCompressionLevel] + tw := &truncWriter{w: w} + fw, _ := p.Get().(*flate.Writer) + if fw == nil { + fw, _ = flate.NewWriter(tw, level) + } else { + fw.Reset(tw) + } + return &flateWriteWrapper{fw: fw, tw: tw, p: p} +} + +// truncWriter is an io.Writer that writes all but the last four bytes of the +// stream to another io.Writer. +type truncWriter struct { + w io.WriteCloser + n int + p [4]byte +} + +func (w *truncWriter) Write(p []byte) (int, error) { + n := 0 + + // fill buffer first for simplicity. + if w.n < len(w.p) { + n = copy(w.p[w.n:], p) + p = p[n:] + w.n += n + if len(p) == 0 { + return n, nil + } + } + + m := len(p) + if m > len(w.p) { + m = len(w.p) + } + + if nn, err := w.w.Write(w.p[:m]); err != nil { + return n + nn, err + } + + copy(w.p[:], w.p[m:]) + copy(w.p[len(w.p)-m:], p[len(p)-m:]) + nn, err := w.w.Write(p[:len(p)-m]) + return n + nn, err +} + +type flateWriteWrapper struct { + fw *flate.Writer + tw *truncWriter + p *sync.Pool +} + +func (w *flateWriteWrapper) Write(p []byte) (int, error) { + if w.fw == nil { + return 0, errWriteClosed + } + return w.fw.Write(p) +} + +func (w *flateWriteWrapper) Close() error { + if w.fw == nil { + return errWriteClosed + } + err1 := w.fw.Flush() + w.p.Put(w.fw) + w.fw = nil + if w.tw.p != [4]byte{0, 0, 0xff, 0xff} { + return errors.New("websocket: internal error, unexpected bytes at end of flate stream") + } + err2 := w.tw.w.Close() + if err1 != nil { + return err1 + } + return err2 +} + +type flateReadWrapper struct { + fr io.ReadCloser +} + +func (r *flateReadWrapper) Read(p []byte) (int, error) { + if r.fr == nil { + return 0, io.ErrClosedPipe + } + n, err := r.fr.Read(p) + if err == io.EOF { + // Preemptively place the reader back in the pool. This helps with + // scenarios where the application does not call NextReader() soon after + // this final read. + r.Close() + } + return n, err +} + +func (r *flateReadWrapper) Close() error { + if r.fr == nil { + return io.ErrClosedPipe + } + err := r.fr.Close() + flateReaderPool.Put(r.fr) + r.fr = nil + return err +} diff --git a/vendor/github.com/gorilla/websocket/conn.go b/vendor/github.com/gorilla/websocket/conn.go new file mode 100644 index 00000000..9971ea36 --- /dev/null +++ b/vendor/github.com/gorilla/websocket/conn.go @@ -0,0 +1,1163 @@ +// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package websocket + +import ( + "bufio" + "encoding/binary" + "errors" + "io" + "io/ioutil" + "math/rand" + "net" + "strconv" + "sync" + "time" + "unicode/utf8" +) + +const ( + // Frame header byte 0 bits from Section 5.2 of RFC 6455 + finalBit = 1 << 7 + rsv1Bit = 1 << 6 + rsv2Bit = 1 << 5 + rsv3Bit = 1 << 4 + + // Frame header byte 1 bits from Section 5.2 of RFC 6455 + maskBit = 1 << 7 + + maxFrameHeaderSize = 2 + 8 + 4 // Fixed header + length + mask + maxControlFramePayloadSize = 125 + + writeWait = time.Second + + defaultReadBufferSize = 4096 + defaultWriteBufferSize = 4096 + + continuationFrame = 0 + noFrame = -1 +) + +// Close codes defined in RFC 6455, section 11.7. +const ( + CloseNormalClosure = 1000 + CloseGoingAway = 1001 + CloseProtocolError = 1002 + CloseUnsupportedData = 1003 + CloseNoStatusReceived = 1005 + CloseAbnormalClosure = 1006 + CloseInvalidFramePayloadData = 1007 + ClosePolicyViolation = 1008 + CloseMessageTooBig = 1009 + CloseMandatoryExtension = 1010 + CloseInternalServerErr = 1011 + CloseServiceRestart = 1012 + CloseTryAgainLater = 1013 + CloseTLSHandshake = 1015 +) + +// The message types are defined in RFC 6455, section 11.8. +const ( + // TextMessage denotes a text data message. The text message payload is + // interpreted as UTF-8 encoded text data. + TextMessage = 1 + + // BinaryMessage denotes a binary data message. + BinaryMessage = 2 + + // CloseMessage denotes a close control message. The optional message + // payload contains a numeric code and text. Use the FormatCloseMessage + // function to format a close message payload. + CloseMessage = 8 + + // PingMessage denotes a ping control message. The optional message payload + // is UTF-8 encoded text. + PingMessage = 9 + + // PongMessage denotes a pong control message. The optional message payload + // is UTF-8 encoded text. + PongMessage = 10 +) + +// ErrCloseSent is returned when the application writes a message to the +// connection after sending a close message. +var ErrCloseSent = errors.New("websocket: close sent") + +// ErrReadLimit is returned when reading a message that is larger than the +// read limit set for the connection. +var ErrReadLimit = errors.New("websocket: read limit exceeded") + +// netError satisfies the net Error interface. +type netError struct { + msg string + temporary bool + timeout bool +} + +func (e *netError) Error() string { return e.msg } +func (e *netError) Temporary() bool { return e.temporary } +func (e *netError) Timeout() bool { return e.timeout } + +// CloseError represents a close message. +type CloseError struct { + // Code is defined in RFC 6455, section 11.7. + Code int + + // Text is the optional text payload. + Text string +} + +func (e *CloseError) Error() string { + s := []byte("websocket: close ") + s = strconv.AppendInt(s, int64(e.Code), 10) + switch e.Code { + case CloseNormalClosure: + s = append(s, " (normal)"...) + case CloseGoingAway: + s = append(s, " (going away)"...) + case CloseProtocolError: + s = append(s, " (protocol error)"...) + case CloseUnsupportedData: + s = append(s, " (unsupported data)"...) + case CloseNoStatusReceived: + s = append(s, " (no status)"...) + case CloseAbnormalClosure: + s = append(s, " (abnormal closure)"...) + case CloseInvalidFramePayloadData: + s = append(s, " (invalid payload data)"...) + case ClosePolicyViolation: + s = append(s, " (policy violation)"...) + case CloseMessageTooBig: + s = append(s, " (message too big)"...) + case CloseMandatoryExtension: + s = append(s, " (mandatory extension missing)"...) + case CloseInternalServerErr: + s = append(s, " (internal server error)"...) + case CloseTLSHandshake: + s = append(s, " (TLS handshake error)"...) + } + if e.Text != "" { + s = append(s, ": "...) + s = append(s, e.Text...) + } + return string(s) +} + +// IsCloseError returns boolean indicating whether the error is a *CloseError +// with one of the specified codes. +func IsCloseError(err error, codes ...int) bool { + if e, ok := err.(*CloseError); ok { + for _, code := range codes { + if e.Code == code { + return true + } + } + } + return false +} + +// IsUnexpectedCloseError returns boolean indicating whether the error is a +// *CloseError with a code not in the list of expected codes. +func IsUnexpectedCloseError(err error, expectedCodes ...int) bool { + if e, ok := err.(*CloseError); ok { + for _, code := range expectedCodes { + if e.Code == code { + return false + } + } + return true + } + return false +} + +var ( + errWriteTimeout = &netError{msg: "websocket: write timeout", timeout: true, temporary: true} + errUnexpectedEOF = &CloseError{Code: CloseAbnormalClosure, Text: io.ErrUnexpectedEOF.Error()} + errBadWriteOpCode = errors.New("websocket: bad write message type") + errWriteClosed = errors.New("websocket: write closed") + errInvalidControlFrame = errors.New("websocket: invalid control frame") +) + +func newMaskKey() [4]byte { + n := rand.Uint32() + return [4]byte{byte(n), byte(n >> 8), byte(n >> 16), byte(n >> 24)} +} + +func hideTempErr(err error) error { + if e, ok := err.(net.Error); ok && e.Temporary() { + err = &netError{msg: e.Error(), timeout: e.Timeout()} + } + return err +} + +func isControl(frameType int) bool { + return frameType == CloseMessage || frameType == PingMessage || frameType == PongMessage +} + +func isData(frameType int) bool { + return frameType == TextMessage || frameType == BinaryMessage +} + +var validReceivedCloseCodes = map[int]bool{ + // see http://www.iana.org/assignments/websocket/websocket.xhtml#close-code-number + + CloseNormalClosure: true, + CloseGoingAway: true, + CloseProtocolError: true, + CloseUnsupportedData: true, + CloseNoStatusReceived: false, + CloseAbnormalClosure: false, + CloseInvalidFramePayloadData: true, + ClosePolicyViolation: true, + CloseMessageTooBig: true, + CloseMandatoryExtension: true, + CloseInternalServerErr: true, + CloseServiceRestart: true, + CloseTryAgainLater: true, + CloseTLSHandshake: false, +} + +func isValidReceivedCloseCode(code int) bool { + return validReceivedCloseCodes[code] || (code >= 3000 && code <= 4999) +} + +// BufferPool represents a pool of buffers. The *sync.Pool type satisfies this +// interface. The type of the value stored in a pool is not specified. +type BufferPool interface { + // Get gets a value from the pool or returns nil if the pool is empty. + Get() interface{} + // Put adds a value to the pool. + Put(interface{}) +} + +// writePoolData is the type added to the write buffer pool. This wrapper is +// used to prevent applications from peeking at and depending on the values +// added to the pool. +type writePoolData struct{ buf []byte } + +// The Conn type represents a WebSocket connection. +type Conn struct { + conn net.Conn + isServer bool + subprotocol string + + // Write fields + mu chan bool // used as mutex to protect write to conn + writeBuf []byte // frame is constructed in this buffer. + writePool BufferPool + writeBufSize int + writeDeadline time.Time + writer io.WriteCloser // the current writer returned to the application + isWriting bool // for best-effort concurrent write detection + + writeErrMu sync.Mutex + writeErr error + + enableWriteCompression bool + compressionLevel int + newCompressionWriter func(io.WriteCloser, int) io.WriteCloser + + // Read fields + reader io.ReadCloser // the current reader returned to the application + readErr error + br *bufio.Reader + readRemaining int64 // bytes remaining in current frame. + readFinal bool // true the current message has more frames. + readLength int64 // Message size. + readLimit int64 // Maximum message size. + readMaskPos int + readMaskKey [4]byte + handlePong func(string) error + handlePing func(string) error + handleClose func(int, string) error + readErrCount int + messageReader *messageReader // the current low-level reader + + readDecompress bool // whether last read frame had RSV1 set + newDecompressionReader func(io.Reader) io.ReadCloser +} + +func newConn(conn net.Conn, isServer bool, readBufferSize, writeBufferSize int, writeBufferPool BufferPool, br *bufio.Reader, writeBuf []byte) *Conn { + + if br == nil { + if readBufferSize == 0 { + readBufferSize = defaultReadBufferSize + } else if readBufferSize < maxControlFramePayloadSize { + // must be large enough for control frame + readBufferSize = maxControlFramePayloadSize + } + br = bufio.NewReaderSize(conn, readBufferSize) + } + + if writeBufferSize <= 0 { + writeBufferSize = defaultWriteBufferSize + } + writeBufferSize += maxFrameHeaderSize + + if writeBuf == nil && writeBufferPool == nil { + writeBuf = make([]byte, writeBufferSize) + } + + mu := make(chan bool, 1) + mu <- true + c := &Conn{ + isServer: isServer, + br: br, + conn: conn, + mu: mu, + readFinal: true, + writeBuf: writeBuf, + writePool: writeBufferPool, + writeBufSize: writeBufferSize, + enableWriteCompression: true, + compressionLevel: defaultCompressionLevel, + } + c.SetCloseHandler(nil) + c.SetPingHandler(nil) + c.SetPongHandler(nil) + return c +} + +// Subprotocol returns the negotiated protocol for the connection. +func (c *Conn) Subprotocol() string { + return c.subprotocol +} + +// Close closes the underlying network connection without sending or waiting +// for a close message. +func (c *Conn) Close() error { + return c.conn.Close() +} + +// LocalAddr returns the local network address. +func (c *Conn) LocalAddr() net.Addr { + return c.conn.LocalAddr() +} + +// RemoteAddr returns the remote network address. +func (c *Conn) RemoteAddr() net.Addr { + return c.conn.RemoteAddr() +} + +// Write methods + +func (c *Conn) writeFatal(err error) error { + err = hideTempErr(err) + c.writeErrMu.Lock() + if c.writeErr == nil { + c.writeErr = err + } + c.writeErrMu.Unlock() + return err +} + +func (c *Conn) read(n int) ([]byte, error) { + p, err := c.br.Peek(n) + if err == io.EOF { + err = errUnexpectedEOF + } + c.br.Discard(len(p)) + return p, err +} + +func (c *Conn) write(frameType int, deadline time.Time, buf0, buf1 []byte) error { + <-c.mu + defer func() { c.mu <- true }() + + c.writeErrMu.Lock() + err := c.writeErr + c.writeErrMu.Unlock() + if err != nil { + return err + } + + c.conn.SetWriteDeadline(deadline) + if len(buf1) == 0 { + _, err = c.conn.Write(buf0) + } else { + err = c.writeBufs(buf0, buf1) + } + if err != nil { + return c.writeFatal(err) + } + if frameType == CloseMessage { + c.writeFatal(ErrCloseSent) + } + return nil +} + +// WriteControl writes a control message with the given deadline. The allowed +// message types are CloseMessage, PingMessage and PongMessage. +func (c *Conn) WriteControl(messageType int, data []byte, deadline time.Time) error { + if !isControl(messageType) { + return errBadWriteOpCode + } + if len(data) > maxControlFramePayloadSize { + return errInvalidControlFrame + } + + b0 := byte(messageType) | finalBit + b1 := byte(len(data)) + if !c.isServer { + b1 |= maskBit + } + + buf := make([]byte, 0, maxFrameHeaderSize+maxControlFramePayloadSize) + buf = append(buf, b0, b1) + + if c.isServer { + buf = append(buf, data...) + } else { + key := newMaskKey() + buf = append(buf, key[:]...) + buf = append(buf, data...) + maskBytes(key, 0, buf[6:]) + } + + d := time.Hour * 1000 + if !deadline.IsZero() { + d = deadline.Sub(time.Now()) + if d < 0 { + return errWriteTimeout + } + } + + timer := time.NewTimer(d) + select { + case <-c.mu: + timer.Stop() + case <-timer.C: + return errWriteTimeout + } + defer func() { c.mu <- true }() + + c.writeErrMu.Lock() + err := c.writeErr + c.writeErrMu.Unlock() + if err != nil { + return err + } + + c.conn.SetWriteDeadline(deadline) + _, err = c.conn.Write(buf) + if err != nil { + return c.writeFatal(err) + } + if messageType == CloseMessage { + c.writeFatal(ErrCloseSent) + } + return err +} + +// beginMessage prepares a connection and message writer for a new message. +func (c *Conn) beginMessage(mw *messageWriter, messageType int) error { + // Close previous writer if not already closed by the application. It's + // probably better to return an error in this situation, but we cannot + // change this without breaking existing applications. + if c.writer != nil { + c.writer.Close() + c.writer = nil + } + + if !isControl(messageType) && !isData(messageType) { + return errBadWriteOpCode + } + + c.writeErrMu.Lock() + err := c.writeErr + c.writeErrMu.Unlock() + if err != nil { + return err + } + + mw.c = c + mw.frameType = messageType + mw.pos = maxFrameHeaderSize + + if c.writeBuf == nil { + wpd, ok := c.writePool.Get().(writePoolData) + if ok { + c.writeBuf = wpd.buf + } else { + c.writeBuf = make([]byte, c.writeBufSize) + } + } + return nil +} + +// NextWriter returns a writer for the next message to send. The writer's Close +// method flushes the complete message to the network. +// +// There can be at most one open writer on a connection. NextWriter closes the +// previous writer if the application has not already done so. +// +// All message types (TextMessage, BinaryMessage, CloseMessage, PingMessage and +// PongMessage) are supported. +func (c *Conn) NextWriter(messageType int) (io.WriteCloser, error) { + var mw messageWriter + if err := c.beginMessage(&mw, messageType); err != nil { + return nil, err + } + c.writer = &mw + if c.newCompressionWriter != nil && c.enableWriteCompression && isData(messageType) { + w := c.newCompressionWriter(c.writer, c.compressionLevel) + mw.compress = true + c.writer = w + } + return c.writer, nil +} + +type messageWriter struct { + c *Conn + compress bool // whether next call to flushFrame should set RSV1 + pos int // end of data in writeBuf. + frameType int // type of the current frame. + err error +} + +func (w *messageWriter) endMessage(err error) error { + if w.err != nil { + return err + } + c := w.c + w.err = err + c.writer = nil + if c.writePool != nil { + c.writePool.Put(writePoolData{buf: c.writeBuf}) + c.writeBuf = nil + } + return err +} + +// flushFrame writes buffered data and extra as a frame to the network. The +// final argument indicates that this is the last frame in the message. +func (w *messageWriter) flushFrame(final bool, extra []byte) error { + c := w.c + length := w.pos - maxFrameHeaderSize + len(extra) + + // Check for invalid control frames. + if isControl(w.frameType) && + (!final || length > maxControlFramePayloadSize) { + return w.endMessage(errInvalidControlFrame) + } + + b0 := byte(w.frameType) + if final { + b0 |= finalBit + } + if w.compress { + b0 |= rsv1Bit + } + w.compress = false + + b1 := byte(0) + if !c.isServer { + b1 |= maskBit + } + + // Assume that the frame starts at beginning of c.writeBuf. + framePos := 0 + if c.isServer { + // Adjust up if mask not included in the header. + framePos = 4 + } + + switch { + case length >= 65536: + c.writeBuf[framePos] = b0 + c.writeBuf[framePos+1] = b1 | 127 + binary.BigEndian.PutUint64(c.writeBuf[framePos+2:], uint64(length)) + case length > 125: + framePos += 6 + c.writeBuf[framePos] = b0 + c.writeBuf[framePos+1] = b1 | 126 + binary.BigEndian.PutUint16(c.writeBuf[framePos+2:], uint16(length)) + default: + framePos += 8 + c.writeBuf[framePos] = b0 + c.writeBuf[framePos+1] = b1 | byte(length) + } + + if !c.isServer { + key := newMaskKey() + copy(c.writeBuf[maxFrameHeaderSize-4:], key[:]) + maskBytes(key, 0, c.writeBuf[maxFrameHeaderSize:w.pos]) + if len(extra) > 0 { + return w.endMessage(c.writeFatal(errors.New("websocket: internal error, extra used in client mode"))) + } + } + + // Write the buffers to the connection with best-effort detection of + // concurrent writes. See the concurrency section in the package + // documentation for more info. + + if c.isWriting { + panic("concurrent write to websocket connection") + } + c.isWriting = true + + err := c.write(w.frameType, c.writeDeadline, c.writeBuf[framePos:w.pos], extra) + + if !c.isWriting { + panic("concurrent write to websocket connection") + } + c.isWriting = false + + if err != nil { + return w.endMessage(err) + } + + if final { + w.endMessage(errWriteClosed) + return nil + } + + // Setup for next frame. + w.pos = maxFrameHeaderSize + w.frameType = continuationFrame + return nil +} + +func (w *messageWriter) ncopy(max int) (int, error) { + n := len(w.c.writeBuf) - w.pos + if n <= 0 { + if err := w.flushFrame(false, nil); err != nil { + return 0, err + } + n = len(w.c.writeBuf) - w.pos + } + if n > max { + n = max + } + return n, nil +} + +func (w *messageWriter) Write(p []byte) (int, error) { + if w.err != nil { + return 0, w.err + } + + if len(p) > 2*len(w.c.writeBuf) && w.c.isServer { + // Don't buffer large messages. + err := w.flushFrame(false, p) + if err != nil { + return 0, err + } + return len(p), nil + } + + nn := len(p) + for len(p) > 0 { + n, err := w.ncopy(len(p)) + if err != nil { + return 0, err + } + copy(w.c.writeBuf[w.pos:], p[:n]) + w.pos += n + p = p[n:] + } + return nn, nil +} + +func (w *messageWriter) WriteString(p string) (int, error) { + if w.err != nil { + return 0, w.err + } + + nn := len(p) + for len(p) > 0 { + n, err := w.ncopy(len(p)) + if err != nil { + return 0, err + } + copy(w.c.writeBuf[w.pos:], p[:n]) + w.pos += n + p = p[n:] + } + return nn, nil +} + +func (w *messageWriter) ReadFrom(r io.Reader) (nn int64, err error) { + if w.err != nil { + return 0, w.err + } + for { + if w.pos == len(w.c.writeBuf) { + err = w.flushFrame(false, nil) + if err != nil { + break + } + } + var n int + n, err = r.Read(w.c.writeBuf[w.pos:]) + w.pos += n + nn += int64(n) + if err != nil { + if err == io.EOF { + err = nil + } + break + } + } + return nn, err +} + +func (w *messageWriter) Close() error { + if w.err != nil { + return w.err + } + return w.flushFrame(true, nil) +} + +// WritePreparedMessage writes prepared message into connection. +func (c *Conn) WritePreparedMessage(pm *PreparedMessage) error { + frameType, frameData, err := pm.frame(prepareKey{ + isServer: c.isServer, + compress: c.newCompressionWriter != nil && c.enableWriteCompression && isData(pm.messageType), + compressionLevel: c.compressionLevel, + }) + if err != nil { + return err + } + if c.isWriting { + panic("concurrent write to websocket connection") + } + c.isWriting = true + err = c.write(frameType, c.writeDeadline, frameData, nil) + if !c.isWriting { + panic("concurrent write to websocket connection") + } + c.isWriting = false + return err +} + +// WriteMessage is a helper method for getting a writer using NextWriter, +// writing the message and closing the writer. +func (c *Conn) WriteMessage(messageType int, data []byte) error { + + if c.isServer && (c.newCompressionWriter == nil || !c.enableWriteCompression) { + // Fast path with no allocations and single frame. + + var mw messageWriter + if err := c.beginMessage(&mw, messageType); err != nil { + return err + } + n := copy(c.writeBuf[mw.pos:], data) + mw.pos += n + data = data[n:] + return mw.flushFrame(true, data) + } + + w, err := c.NextWriter(messageType) + if err != nil { + return err + } + if _, err = w.Write(data); err != nil { + return err + } + return w.Close() +} + +// SetWriteDeadline sets the write deadline on the underlying network +// connection. After a write has timed out, the websocket state is corrupt and +// all future writes will return an error. A zero value for t means writes will +// not time out. +func (c *Conn) SetWriteDeadline(t time.Time) error { + c.writeDeadline = t + return nil +} + +// Read methods + +func (c *Conn) advanceFrame() (int, error) { + // 1. Skip remainder of previous frame. + + if c.readRemaining > 0 { + if _, err := io.CopyN(ioutil.Discard, c.br, c.readRemaining); err != nil { + return noFrame, err + } + } + + // 2. Read and parse first two bytes of frame header. + + p, err := c.read(2) + if err != nil { + return noFrame, err + } + + final := p[0]&finalBit != 0 + frameType := int(p[0] & 0xf) + mask := p[1]&maskBit != 0 + c.readRemaining = int64(p[1] & 0x7f) + + c.readDecompress = false + if c.newDecompressionReader != nil && (p[0]&rsv1Bit) != 0 { + c.readDecompress = true + p[0] &^= rsv1Bit + } + + if rsv := p[0] & (rsv1Bit | rsv2Bit | rsv3Bit); rsv != 0 { + return noFrame, c.handleProtocolError("unexpected reserved bits 0x" + strconv.FormatInt(int64(rsv), 16)) + } + + switch frameType { + case CloseMessage, PingMessage, PongMessage: + if c.readRemaining > maxControlFramePayloadSize { + return noFrame, c.handleProtocolError("control frame length > 125") + } + if !final { + return noFrame, c.handleProtocolError("control frame not final") + } + case TextMessage, BinaryMessage: + if !c.readFinal { + return noFrame, c.handleProtocolError("message start before final message frame") + } + c.readFinal = final + case continuationFrame: + if c.readFinal { + return noFrame, c.handleProtocolError("continuation after final message frame") + } + c.readFinal = final + default: + return noFrame, c.handleProtocolError("unknown opcode " + strconv.Itoa(frameType)) + } + + // 3. Read and parse frame length. + + switch c.readRemaining { + case 126: + p, err := c.read(2) + if err != nil { + return noFrame, err + } + c.readRemaining = int64(binary.BigEndian.Uint16(p)) + case 127: + p, err := c.read(8) + if err != nil { + return noFrame, err + } + c.readRemaining = int64(binary.BigEndian.Uint64(p)) + } + + // 4. Handle frame masking. + + if mask != c.isServer { + return noFrame, c.handleProtocolError("incorrect mask flag") + } + + if mask { + c.readMaskPos = 0 + p, err := c.read(len(c.readMaskKey)) + if err != nil { + return noFrame, err + } + copy(c.readMaskKey[:], p) + } + + // 5. For text and binary messages, enforce read limit and return. + + if frameType == continuationFrame || frameType == TextMessage || frameType == BinaryMessage { + + c.readLength += c.readRemaining + if c.readLimit > 0 && c.readLength > c.readLimit { + c.WriteControl(CloseMessage, FormatCloseMessage(CloseMessageTooBig, ""), time.Now().Add(writeWait)) + return noFrame, ErrReadLimit + } + + return frameType, nil + } + + // 6. Read control frame payload. + + var payload []byte + if c.readRemaining > 0 { + payload, err = c.read(int(c.readRemaining)) + c.readRemaining = 0 + if err != nil { + return noFrame, err + } + if c.isServer { + maskBytes(c.readMaskKey, 0, payload) + } + } + + // 7. Process control frame payload. + + switch frameType { + case PongMessage: + if err := c.handlePong(string(payload)); err != nil { + return noFrame, err + } + case PingMessage: + if err := c.handlePing(string(payload)); err != nil { + return noFrame, err + } + case CloseMessage: + closeCode := CloseNoStatusReceived + closeText := "" + if len(payload) >= 2 { + closeCode = int(binary.BigEndian.Uint16(payload)) + if !isValidReceivedCloseCode(closeCode) { + return noFrame, c.handleProtocolError("invalid close code") + } + closeText = string(payload[2:]) + if !utf8.ValidString(closeText) { + return noFrame, c.handleProtocolError("invalid utf8 payload in close frame") + } + } + if err := c.handleClose(closeCode, closeText); err != nil { + return noFrame, err + } + return noFrame, &CloseError{Code: closeCode, Text: closeText} + } + + return frameType, nil +} + +func (c *Conn) handleProtocolError(message string) error { + c.WriteControl(CloseMessage, FormatCloseMessage(CloseProtocolError, message), time.Now().Add(writeWait)) + return errors.New("websocket: " + message) +} + +// NextReader returns the next data message received from the peer. The +// returned messageType is either TextMessage or BinaryMessage. +// +// There can be at most one open reader on a connection. NextReader discards +// the previous message if the application has not already consumed it. +// +// Applications must break out of the application's read loop when this method +// returns a non-nil error value. Errors returned from this method are +// permanent. Once this method returns a non-nil error, all subsequent calls to +// this method return the same error. +func (c *Conn) NextReader() (messageType int, r io.Reader, err error) { + // Close previous reader, only relevant for decompression. + if c.reader != nil { + c.reader.Close() + c.reader = nil + } + + c.messageReader = nil + c.readLength = 0 + + for c.readErr == nil { + frameType, err := c.advanceFrame() + if err != nil { + c.readErr = hideTempErr(err) + break + } + if frameType == TextMessage || frameType == BinaryMessage { + c.messageReader = &messageReader{c} + c.reader = c.messageReader + if c.readDecompress { + c.reader = c.newDecompressionReader(c.reader) + } + return frameType, c.reader, nil + } + } + + // Applications that do handle the error returned from this method spin in + // tight loop on connection failure. To help application developers detect + // this error, panic on repeated reads to the failed connection. + c.readErrCount++ + if c.readErrCount >= 1000 { + panic("repeated read on failed websocket connection") + } + + return noFrame, nil, c.readErr +} + +type messageReader struct{ c *Conn } + +func (r *messageReader) Read(b []byte) (int, error) { + c := r.c + if c.messageReader != r { + return 0, io.EOF + } + + for c.readErr == nil { + + if c.readRemaining > 0 { + if int64(len(b)) > c.readRemaining { + b = b[:c.readRemaining] + } + n, err := c.br.Read(b) + c.readErr = hideTempErr(err) + if c.isServer { + c.readMaskPos = maskBytes(c.readMaskKey, c.readMaskPos, b[:n]) + } + c.readRemaining -= int64(n) + if c.readRemaining > 0 && c.readErr == io.EOF { + c.readErr = errUnexpectedEOF + } + return n, c.readErr + } + + if c.readFinal { + c.messageReader = nil + return 0, io.EOF + } + + frameType, err := c.advanceFrame() + switch { + case err != nil: + c.readErr = hideTempErr(err) + case frameType == TextMessage || frameType == BinaryMessage: + c.readErr = errors.New("websocket: internal error, unexpected text or binary in Reader") + } + } + + err := c.readErr + if err == io.EOF && c.messageReader == r { + err = errUnexpectedEOF + } + return 0, err +} + +func (r *messageReader) Close() error { + return nil +} + +// ReadMessage is a helper method for getting a reader using NextReader and +// reading from that reader to a buffer. +func (c *Conn) ReadMessage() (messageType int, p []byte, err error) { + var r io.Reader + messageType, r, err = c.NextReader() + if err != nil { + return messageType, nil, err + } + p, err = ioutil.ReadAll(r) + return messageType, p, err +} + +// SetReadDeadline sets the read deadline on the underlying network connection. +// After a read has timed out, the websocket connection state is corrupt and +// all future reads will return an error. A zero value for t means reads will +// not time out. +func (c *Conn) SetReadDeadline(t time.Time) error { + return c.conn.SetReadDeadline(t) +} + +// SetReadLimit sets the maximum size in bytes for a message read from the peer. If a +// message exceeds the limit, the connection sends a close message to the peer +// and returns ErrReadLimit to the application. +func (c *Conn) SetReadLimit(limit int64) { + c.readLimit = limit +} + +// CloseHandler returns the current close handler +func (c *Conn) CloseHandler() func(code int, text string) error { + return c.handleClose +} + +// SetCloseHandler sets the handler for close messages received from the peer. +// The code argument to h is the received close code or CloseNoStatusReceived +// if the close message is empty. The default close handler sends a close +// message back to the peer. +// +// The handler function is called from the NextReader, ReadMessage and message +// reader Read methods. The application must read the connection to process +// close messages as described in the section on Control Messages above. +// +// The connection read methods return a CloseError when a close message is +// received. Most applications should handle close messages as part of their +// normal error handling. Applications should only set a close handler when the +// application must perform some action before sending a close message back to +// the peer. +func (c *Conn) SetCloseHandler(h func(code int, text string) error) { + if h == nil { + h = func(code int, text string) error { + message := FormatCloseMessage(code, "") + c.WriteControl(CloseMessage, message, time.Now().Add(writeWait)) + return nil + } + } + c.handleClose = h +} + +// PingHandler returns the current ping handler +func (c *Conn) PingHandler() func(appData string) error { + return c.handlePing +} + +// SetPingHandler sets the handler for ping messages received from the peer. +// The appData argument to h is the PING message application data. The default +// ping handler sends a pong to the peer. +// +// The handler function is called from the NextReader, ReadMessage and message +// reader Read methods. The application must read the connection to process +// ping messages as described in the section on Control Messages above. +func (c *Conn) SetPingHandler(h func(appData string) error) { + if h == nil { + h = func(message string) error { + err := c.WriteControl(PongMessage, []byte(message), time.Now().Add(writeWait)) + if err == ErrCloseSent { + return nil + } else if e, ok := err.(net.Error); ok && e.Temporary() { + return nil + } + return err + } + } + c.handlePing = h +} + +// PongHandler returns the current pong handler +func (c *Conn) PongHandler() func(appData string) error { + return c.handlePong +} + +// SetPongHandler sets the handler for pong messages received from the peer. +// The appData argument to h is the PONG message application data. The default +// pong handler does nothing. +// +// The handler function is called from the NextReader, ReadMessage and message +// reader Read methods. The application must read the connection to process +// pong messages as described in the section on Control Messages above. +func (c *Conn) SetPongHandler(h func(appData string) error) { + if h == nil { + h = func(string) error { return nil } + } + c.handlePong = h +} + +// UnderlyingConn returns the internal net.Conn. This can be used to further +// modifications to connection specific flags. +func (c *Conn) UnderlyingConn() net.Conn { + return c.conn +} + +// EnableWriteCompression enables and disables write compression of +// subsequent text and binary messages. This function is a noop if +// compression was not negotiated with the peer. +func (c *Conn) EnableWriteCompression(enable bool) { + c.enableWriteCompression = enable +} + +// SetCompressionLevel sets the flate compression level for subsequent text and +// binary messages. This function is a noop if compression was not negotiated +// with the peer. See the compress/flate package for a description of +// compression levels. +func (c *Conn) SetCompressionLevel(level int) error { + if !isValidCompressionLevel(level) { + return errors.New("websocket: invalid compression level") + } + c.compressionLevel = level + return nil +} + +// FormatCloseMessage formats closeCode and text as a WebSocket close message. +// An empty message is returned for code CloseNoStatusReceived. +func FormatCloseMessage(closeCode int, text string) []byte { + if closeCode == CloseNoStatusReceived { + // Return empty message because it's illegal to send + // CloseNoStatusReceived. Return non-nil value in case application + // checks for nil. + return []byte{} + } + buf := make([]byte, 2+len(text)) + binary.BigEndian.PutUint16(buf, uint16(closeCode)) + copy(buf[2:], text) + return buf +} diff --git a/vendor/github.com/gorilla/websocket/conn_write.go b/vendor/github.com/gorilla/websocket/conn_write.go new file mode 100644 index 00000000..a509a21f --- /dev/null +++ b/vendor/github.com/gorilla/websocket/conn_write.go @@ -0,0 +1,15 @@ +// Copyright 2016 The Gorilla WebSocket Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.8 + +package websocket + +import "net" + +func (c *Conn) writeBufs(bufs ...[]byte) error { + b := net.Buffers(bufs) + _, err := b.WriteTo(c.conn) + return err +} diff --git a/vendor/github.com/gorilla/websocket/conn_write_legacy.go b/vendor/github.com/gorilla/websocket/conn_write_legacy.go new file mode 100644 index 00000000..37edaff5 --- /dev/null +++ b/vendor/github.com/gorilla/websocket/conn_write_legacy.go @@ -0,0 +1,18 @@ +// Copyright 2016 The Gorilla WebSocket Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !go1.8 + +package websocket + +func (c *Conn) writeBufs(bufs ...[]byte) error { + for _, buf := range bufs { + if len(buf) > 0 { + if _, err := c.conn.Write(buf); err != nil { + return err + } + } + } + return nil +} diff --git a/vendor/github.com/gorilla/websocket/doc.go b/vendor/github.com/gorilla/websocket/doc.go new file mode 100644 index 00000000..c6f4df89 --- /dev/null +++ b/vendor/github.com/gorilla/websocket/doc.go @@ -0,0 +1,227 @@ +// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package websocket implements the WebSocket protocol defined in RFC 6455. +// +// Overview +// +// The Conn type represents a WebSocket connection. A server application calls +// the Upgrader.Upgrade method from an HTTP request handler to get a *Conn: +// +// var upgrader = websocket.Upgrader{ +// ReadBufferSize: 1024, +// WriteBufferSize: 1024, +// } +// +// func handler(w http.ResponseWriter, r *http.Request) { +// conn, err := upgrader.Upgrade(w, r, nil) +// if err != nil { +// log.Println(err) +// return +// } +// ... Use conn to send and receive messages. +// } +// +// Call the connection's WriteMessage and ReadMessage methods to send and +// receive messages as a slice of bytes. This snippet of code shows how to echo +// messages using these methods: +// +// for { +// messageType, p, err := conn.ReadMessage() +// if err != nil { +// log.Println(err) +// return +// } +// if err := conn.WriteMessage(messageType, p); err != nil { +// log.Println(err) +// return +// } +// } +// +// In above snippet of code, p is a []byte and messageType is an int with value +// websocket.BinaryMessage or websocket.TextMessage. +// +// An application can also send and receive messages using the io.WriteCloser +// and io.Reader interfaces. To send a message, call the connection NextWriter +// method to get an io.WriteCloser, write the message to the writer and close +// the writer when done. To receive a message, call the connection NextReader +// method to get an io.Reader and read until io.EOF is returned. This snippet +// shows how to echo messages using the NextWriter and NextReader methods: +// +// for { +// messageType, r, err := conn.NextReader() +// if err != nil { +// return +// } +// w, err := conn.NextWriter(messageType) +// if err != nil { +// return err +// } +// if _, err := io.Copy(w, r); err != nil { +// return err +// } +// if err := w.Close(); err != nil { +// return err +// } +// } +// +// Data Messages +// +// The WebSocket protocol distinguishes between text and binary data messages. +// Text messages are interpreted as UTF-8 encoded text. The interpretation of +// binary messages is left to the application. +// +// This package uses the TextMessage and BinaryMessage integer constants to +// identify the two data message types. The ReadMessage and NextReader methods +// return the type of the received message. The messageType argument to the +// WriteMessage and NextWriter methods specifies the type of a sent message. +// +// It is the application's responsibility to ensure that text messages are +// valid UTF-8 encoded text. +// +// Control Messages +// +// The WebSocket protocol defines three types of control messages: close, ping +// and pong. Call the connection WriteControl, WriteMessage or NextWriter +// methods to send a control message to the peer. +// +// Connections handle received close messages by calling the handler function +// set with the SetCloseHandler method and by returning a *CloseError from the +// NextReader, ReadMessage or the message Read method. The default close +// handler sends a close message to the peer. +// +// Connections handle received ping messages by calling the handler function +// set with the SetPingHandler method. The default ping handler sends a pong +// message to the peer. +// +// Connections handle received pong messages by calling the handler function +// set with the SetPongHandler method. The default pong handler does nothing. +// If an application sends ping messages, then the application should set a +// pong handler to receive the corresponding pong. +// +// The control message handler functions are called from the NextReader, +// ReadMessage and message reader Read methods. The default close and ping +// handlers can block these methods for a short time when the handler writes to +// the connection. +// +// The application must read the connection to process close, ping and pong +// messages sent from the peer. If the application is not otherwise interested +// in messages from the peer, then the application should start a goroutine to +// read and discard messages from the peer. A simple example is: +// +// func readLoop(c *websocket.Conn) { +// for { +// if _, _, err := c.NextReader(); err != nil { +// c.Close() +// break +// } +// } +// } +// +// Concurrency +// +// Connections support one concurrent reader and one concurrent writer. +// +// Applications are responsible for ensuring that no more than one goroutine +// calls the write methods (NextWriter, SetWriteDeadline, WriteMessage, +// WriteJSON, EnableWriteCompression, SetCompressionLevel) concurrently and +// that no more than one goroutine calls the read methods (NextReader, +// SetReadDeadline, ReadMessage, ReadJSON, SetPongHandler, SetPingHandler) +// concurrently. +// +// The Close and WriteControl methods can be called concurrently with all other +// methods. +// +// Origin Considerations +// +// Web browsers allow Javascript applications to open a WebSocket connection to +// any host. It's up to the server to enforce an origin policy using the Origin +// request header sent by the browser. +// +// The Upgrader calls the function specified in the CheckOrigin field to check +// the origin. If the CheckOrigin function returns false, then the Upgrade +// method fails the WebSocket handshake with HTTP status 403. +// +// If the CheckOrigin field is nil, then the Upgrader uses a safe default: fail +// the handshake if the Origin request header is present and the Origin host is +// not equal to the Host request header. +// +// The deprecated package-level Upgrade function does not perform origin +// checking. The application is responsible for checking the Origin header +// before calling the Upgrade function. +// +// Buffers +// +// Connections buffer network input and output to reduce the number +// of system calls when reading or writing messages. +// +// Write buffers are also used for constructing WebSocket frames. See RFC 6455, +// Section 5 for a discussion of message framing. A WebSocket frame header is +// written to the network each time a write buffer is flushed to the network. +// Decreasing the size of the write buffer can increase the amount of framing +// overhead on the connection. +// +// The buffer sizes in bytes are specified by the ReadBufferSize and +// WriteBufferSize fields in the Dialer and Upgrader. The Dialer uses a default +// size of 4096 when a buffer size field is set to zero. The Upgrader reuses +// buffers created by the HTTP server when a buffer size field is set to zero. +// The HTTP server buffers have a size of 4096 at the time of this writing. +// +// The buffer sizes do not limit the size of a message that can be read or +// written by a connection. +// +// Buffers are held for the lifetime of the connection by default. If the +// Dialer or Upgrader WriteBufferPool field is set, then a connection holds the +// write buffer only when writing a message. +// +// Applications should tune the buffer sizes to balance memory use and +// performance. Increasing the buffer size uses more memory, but can reduce the +// number of system calls to read or write the network. In the case of writing, +// increasing the buffer size can reduce the number of frame headers written to +// the network. +// +// Some guidelines for setting buffer parameters are: +// +// Limit the buffer sizes to the maximum expected message size. Buffers larger +// than the largest message do not provide any benefit. +// +// Depending on the distribution of message sizes, setting the buffer size to +// to a value less than the maximum expected message size can greatly reduce +// memory use with a small impact on performance. Here's an example: If 99% of +// the messages are smaller than 256 bytes and the maximum message size is 512 +// bytes, then a buffer size of 256 bytes will result in 1.01 more system calls +// than a buffer size of 512 bytes. The memory savings is 50%. +// +// A write buffer pool is useful when the application has a modest number +// writes over a large number of connections. when buffers are pooled, a larger +// buffer size has a reduced impact on total memory use and has the benefit of +// reducing system calls and frame overhead. +// +// Compression EXPERIMENTAL +// +// Per message compression extensions (RFC 7692) are experimentally supported +// by this package in a limited capacity. Setting the EnableCompression option +// to true in Dialer or Upgrader will attempt to negotiate per message deflate +// support. +// +// var upgrader = websocket.Upgrader{ +// EnableCompression: true, +// } +// +// If compression was successfully negotiated with the connection's peer, any +// message received in compressed form will be automatically decompressed. +// All Read methods will return uncompressed bytes. +// +// Per message compression of messages written to a connection can be enabled +// or disabled by calling the corresponding Conn method: +// +// conn.EnableWriteCompression(false) +// +// Currently this package does not support compression with "context takeover". +// This means that messages must be compressed and decompressed in isolation, +// without retaining sliding window or dictionary state across messages. For +// more details refer to RFC 7692. +// +// Use of compression is experimental and may result in decreased performance. +package websocket diff --git a/vendor/github.com/gorilla/websocket/go.mod b/vendor/github.com/gorilla/websocket/go.mod new file mode 100644 index 00000000..93a9e924 --- /dev/null +++ b/vendor/github.com/gorilla/websocket/go.mod @@ -0,0 +1 @@ +module github.com/gorilla/websocket diff --git a/vendor/github.com/gorilla/websocket/go.sum b/vendor/github.com/gorilla/websocket/go.sum new file mode 100644 index 00000000..cf4fbbaa --- /dev/null +++ b/vendor/github.com/gorilla/websocket/go.sum @@ -0,0 +1,2 @@ +github.com/gorilla/websocket v1.4.0 h1:WDFjx/TMzVgy9VdMMQi2K2Emtwi2QcUQsztZ/zLaH/Q= +github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= diff --git a/vendor/github.com/gorilla/websocket/join.go b/vendor/github.com/gorilla/websocket/join.go new file mode 100644 index 00000000..c64f8c82 --- /dev/null +++ b/vendor/github.com/gorilla/websocket/join.go @@ -0,0 +1,42 @@ +// Copyright 2019 The Gorilla WebSocket Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package websocket + +import ( + "io" + "strings" +) + +// JoinMessages concatenates received messages to create a single io.Reader. +// The string term is appended to each message. The returned reader does not +// support concurrent calls to the Read method. +func JoinMessages(c *Conn, term string) io.Reader { + return &joinReader{c: c, term: term} +} + +type joinReader struct { + c *Conn + term string + r io.Reader +} + +func (r *joinReader) Read(p []byte) (int, error) { + if r.r == nil { + var err error + _, r.r, err = r.c.NextReader() + if err != nil { + return 0, err + } + if r.term != "" { + r.r = io.MultiReader(r.r, strings.NewReader(r.term)) + } + } + n, err := r.r.Read(p) + if err == io.EOF { + err = nil + r.r = nil + } + return n, err +} diff --git a/vendor/github.com/gorilla/websocket/json.go b/vendor/github.com/gorilla/websocket/json.go new file mode 100644 index 00000000..dc2c1f64 --- /dev/null +++ b/vendor/github.com/gorilla/websocket/json.go @@ -0,0 +1,60 @@ +// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package websocket + +import ( + "encoding/json" + "io" +) + +// WriteJSON writes the JSON encoding of v as a message. +// +// Deprecated: Use c.WriteJSON instead. +func WriteJSON(c *Conn, v interface{}) error { + return c.WriteJSON(v) +} + +// WriteJSON writes the JSON encoding of v as a message. +// +// See the documentation for encoding/json Marshal for details about the +// conversion of Go values to JSON. +func (c *Conn) WriteJSON(v interface{}) error { + w, err := c.NextWriter(TextMessage) + if err != nil { + return err + } + err1 := json.NewEncoder(w).Encode(v) + err2 := w.Close() + if err1 != nil { + return err1 + } + return err2 +} + +// ReadJSON reads the next JSON-encoded message from the connection and stores +// it in the value pointed to by v. +// +// Deprecated: Use c.ReadJSON instead. +func ReadJSON(c *Conn, v interface{}) error { + return c.ReadJSON(v) +} + +// ReadJSON reads the next JSON-encoded message from the connection and stores +// it in the value pointed to by v. +// +// See the documentation for the encoding/json Unmarshal function for details +// about the conversion of JSON to a Go value. +func (c *Conn) ReadJSON(v interface{}) error { + _, r, err := c.NextReader() + if err != nil { + return err + } + err = json.NewDecoder(r).Decode(v) + if err == io.EOF { + // One value is expected in the message. + err = io.ErrUnexpectedEOF + } + return err +} diff --git a/vendor/github.com/gorilla/websocket/mask.go b/vendor/github.com/gorilla/websocket/mask.go new file mode 100644 index 00000000..577fce9e --- /dev/null +++ b/vendor/github.com/gorilla/websocket/mask.go @@ -0,0 +1,54 @@ +// Copyright 2016 The Gorilla WebSocket Authors. All rights reserved. Use of +// this source code is governed by a BSD-style license that can be found in the +// LICENSE file. + +// +build !appengine + +package websocket + +import "unsafe" + +const wordSize = int(unsafe.Sizeof(uintptr(0))) + +func maskBytes(key [4]byte, pos int, b []byte) int { + // Mask one byte at a time for small buffers. + if len(b) < 2*wordSize { + for i := range b { + b[i] ^= key[pos&3] + pos++ + } + return pos & 3 + } + + // Mask one byte at a time to word boundary. + if n := int(uintptr(unsafe.Pointer(&b[0]))) % wordSize; n != 0 { + n = wordSize - n + for i := range b[:n] { + b[i] ^= key[pos&3] + pos++ + } + b = b[n:] + } + + // Create aligned word size key. + var k [wordSize]byte + for i := range k { + k[i] = key[(pos+i)&3] + } + kw := *(*uintptr)(unsafe.Pointer(&k)) + + // Mask one word at a time. + n := (len(b) / wordSize) * wordSize + for i := 0; i < n; i += wordSize { + *(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&b[0])) + uintptr(i))) ^= kw + } + + // Mask one byte at a time for remaining bytes. + b = b[n:] + for i := range b { + b[i] ^= key[pos&3] + pos++ + } + + return pos & 3 +} diff --git a/vendor/github.com/gorilla/websocket/mask_safe.go b/vendor/github.com/gorilla/websocket/mask_safe.go new file mode 100644 index 00000000..2aac060e --- /dev/null +++ b/vendor/github.com/gorilla/websocket/mask_safe.go @@ -0,0 +1,15 @@ +// Copyright 2016 The Gorilla WebSocket Authors. All rights reserved. Use of +// this source code is governed by a BSD-style license that can be found in the +// LICENSE file. + +// +build appengine + +package websocket + +func maskBytes(key [4]byte, pos int, b []byte) int { + for i := range b { + b[i] ^= key[pos&3] + pos++ + } + return pos & 3 +} diff --git a/vendor/github.com/gorilla/websocket/prepared.go b/vendor/github.com/gorilla/websocket/prepared.go new file mode 100644 index 00000000..74ec565d --- /dev/null +++ b/vendor/github.com/gorilla/websocket/prepared.go @@ -0,0 +1,102 @@ +// Copyright 2017 The Gorilla WebSocket Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package websocket + +import ( + "bytes" + "net" + "sync" + "time" +) + +// PreparedMessage caches on the wire representations of a message payload. +// Use PreparedMessage to efficiently send a message payload to multiple +// connections. PreparedMessage is especially useful when compression is used +// because the CPU and memory expensive compression operation can be executed +// once for a given set of compression options. +type PreparedMessage struct { + messageType int + data []byte + mu sync.Mutex + frames map[prepareKey]*preparedFrame +} + +// prepareKey defines a unique set of options to cache prepared frames in PreparedMessage. +type prepareKey struct { + isServer bool + compress bool + compressionLevel int +} + +// preparedFrame contains data in wire representation. +type preparedFrame struct { + once sync.Once + data []byte +} + +// NewPreparedMessage returns an initialized PreparedMessage. You can then send +// it to connection using WritePreparedMessage method. Valid wire +// representation will be calculated lazily only once for a set of current +// connection options. +func NewPreparedMessage(messageType int, data []byte) (*PreparedMessage, error) { + pm := &PreparedMessage{ + messageType: messageType, + frames: make(map[prepareKey]*preparedFrame), + data: data, + } + + // Prepare a plain server frame. + _, frameData, err := pm.frame(prepareKey{isServer: true, compress: false}) + if err != nil { + return nil, err + } + + // To protect against caller modifying the data argument, remember the data + // copied to the plain server frame. + pm.data = frameData[len(frameData)-len(data):] + return pm, nil +} + +func (pm *PreparedMessage) frame(key prepareKey) (int, []byte, error) { + pm.mu.Lock() + frame, ok := pm.frames[key] + if !ok { + frame = &preparedFrame{} + pm.frames[key] = frame + } + pm.mu.Unlock() + + var err error + frame.once.Do(func() { + // Prepare a frame using a 'fake' connection. + // TODO: Refactor code in conn.go to allow more direct construction of + // the frame. + mu := make(chan bool, 1) + mu <- true + var nc prepareConn + c := &Conn{ + conn: &nc, + mu: mu, + isServer: key.isServer, + compressionLevel: key.compressionLevel, + enableWriteCompression: true, + writeBuf: make([]byte, defaultWriteBufferSize+maxFrameHeaderSize), + } + if key.compress { + c.newCompressionWriter = compressNoContextTakeover + } + err = c.WriteMessage(pm.messageType, pm.data) + frame.data = nc.buf.Bytes() + }) + return pm.messageType, frame.data, err +} + +type prepareConn struct { + buf bytes.Buffer + net.Conn +} + +func (pc *prepareConn) Write(p []byte) (int, error) { return pc.buf.Write(p) } +func (pc *prepareConn) SetWriteDeadline(t time.Time) error { return nil } diff --git a/vendor/github.com/gorilla/websocket/proxy.go b/vendor/github.com/gorilla/websocket/proxy.go new file mode 100644 index 00000000..e87a8c9f --- /dev/null +++ b/vendor/github.com/gorilla/websocket/proxy.go @@ -0,0 +1,77 @@ +// Copyright 2017 The Gorilla WebSocket Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package websocket + +import ( + "bufio" + "encoding/base64" + "errors" + "net" + "net/http" + "net/url" + "strings" +) + +type netDialerFunc func(network, addr string) (net.Conn, error) + +func (fn netDialerFunc) Dial(network, addr string) (net.Conn, error) { + return fn(network, addr) +} + +func init() { + proxy_RegisterDialerType("http", func(proxyURL *url.URL, forwardDialer proxy_Dialer) (proxy_Dialer, error) { + return &httpProxyDialer{proxyURL: proxyURL, forwardDial: forwardDialer.Dial}, nil + }) +} + +type httpProxyDialer struct { + proxyURL *url.URL + forwardDial func(network, addr string) (net.Conn, error) +} + +func (hpd *httpProxyDialer) Dial(network string, addr string) (net.Conn, error) { + hostPort, _ := hostPortNoPort(hpd.proxyURL) + conn, err := hpd.forwardDial(network, hostPort) + if err != nil { + return nil, err + } + + connectHeader := make(http.Header) + if user := hpd.proxyURL.User; user != nil { + proxyUser := user.Username() + if proxyPassword, passwordSet := user.Password(); passwordSet { + credential := base64.StdEncoding.EncodeToString([]byte(proxyUser + ":" + proxyPassword)) + connectHeader.Set("Proxy-Authorization", "Basic "+credential) + } + } + + connectReq := &http.Request{ + Method: "CONNECT", + URL: &url.URL{Opaque: addr}, + Host: addr, + Header: connectHeader, + } + + if err := connectReq.Write(conn); err != nil { + conn.Close() + return nil, err + } + + // Read response. It's OK to use and discard buffered reader here becaue + // the remote server does not speak until spoken to. + br := bufio.NewReader(conn) + resp, err := http.ReadResponse(br, connectReq) + if err != nil { + conn.Close() + return nil, err + } + + if resp.StatusCode != 200 { + conn.Close() + f := strings.SplitN(resp.Status, " ", 2) + return nil, errors.New(f[1]) + } + return conn, nil +} diff --git a/vendor/github.com/gorilla/websocket/server.go b/vendor/github.com/gorilla/websocket/server.go new file mode 100644 index 00000000..3d4480a4 --- /dev/null +++ b/vendor/github.com/gorilla/websocket/server.go @@ -0,0 +1,363 @@ +// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package websocket + +import ( + "bufio" + "errors" + "io" + "net/http" + "net/url" + "strings" + "time" +) + +// HandshakeError describes an error with the handshake from the peer. +type HandshakeError struct { + message string +} + +func (e HandshakeError) Error() string { return e.message } + +// Upgrader specifies parameters for upgrading an HTTP connection to a +// WebSocket connection. +type Upgrader struct { + // HandshakeTimeout specifies the duration for the handshake to complete. + HandshakeTimeout time.Duration + + // ReadBufferSize and WriteBufferSize specify I/O buffer sizes in bytes. If a buffer + // size is zero, then buffers allocated by the HTTP server are used. The + // I/O buffer sizes do not limit the size of the messages that can be sent + // or received. + ReadBufferSize, WriteBufferSize int + + // WriteBufferPool is a pool of buffers for write operations. If the value + // is not set, then write buffers are allocated to the connection for the + // lifetime of the connection. + // + // A pool is most useful when the application has a modest volume of writes + // across a large number of connections. + // + // Applications should use a single pool for each unique value of + // WriteBufferSize. + WriteBufferPool BufferPool + + // Subprotocols specifies the server's supported protocols in order of + // preference. If this field is not nil, then the Upgrade method negotiates a + // subprotocol by selecting the first match in this list with a protocol + // requested by the client. If there's no match, then no protocol is + // negotiated (the Sec-Websocket-Protocol header is not included in the + // handshake response). + Subprotocols []string + + // Error specifies the function for generating HTTP error responses. If Error + // is nil, then http.Error is used to generate the HTTP response. + Error func(w http.ResponseWriter, r *http.Request, status int, reason error) + + // CheckOrigin returns true if the request Origin header is acceptable. If + // CheckOrigin is nil, then a safe default is used: return false if the + // Origin request header is present and the origin host is not equal to + // request Host header. + // + // A CheckOrigin function should carefully validate the request origin to + // prevent cross-site request forgery. + CheckOrigin func(r *http.Request) bool + + // EnableCompression specify if the server should attempt to negotiate per + // message compression (RFC 7692). Setting this value to true does not + // guarantee that compression will be supported. Currently only "no context + // takeover" modes are supported. + EnableCompression bool +} + +func (u *Upgrader) returnError(w http.ResponseWriter, r *http.Request, status int, reason string) (*Conn, error) { + err := HandshakeError{reason} + if u.Error != nil { + u.Error(w, r, status, err) + } else { + w.Header().Set("Sec-Websocket-Version", "13") + http.Error(w, http.StatusText(status), status) + } + return nil, err +} + +// checkSameOrigin returns true if the origin is not set or is equal to the request host. +func checkSameOrigin(r *http.Request) bool { + origin := r.Header["Origin"] + if len(origin) == 0 { + return true + } + u, err := url.Parse(origin[0]) + if err != nil { + return false + } + return equalASCIIFold(u.Host, r.Host) +} + +func (u *Upgrader) selectSubprotocol(r *http.Request, responseHeader http.Header) string { + if u.Subprotocols != nil { + clientProtocols := Subprotocols(r) + for _, serverProtocol := range u.Subprotocols { + for _, clientProtocol := range clientProtocols { + if clientProtocol == serverProtocol { + return clientProtocol + } + } + } + } else if responseHeader != nil { + return responseHeader.Get("Sec-Websocket-Protocol") + } + return "" +} + +// Upgrade upgrades the HTTP server connection to the WebSocket protocol. +// +// The responseHeader is included in the response to the client's upgrade +// request. Use the responseHeader to specify cookies (Set-Cookie) and the +// application negotiated subprotocol (Sec-WebSocket-Protocol). +// +// If the upgrade fails, then Upgrade replies to the client with an HTTP error +// response. +func (u *Upgrader) Upgrade(w http.ResponseWriter, r *http.Request, responseHeader http.Header) (*Conn, error) { + const badHandshake = "websocket: the client is not using the websocket protocol: " + + if !tokenListContainsValue(r.Header, "Connection", "upgrade") { + return u.returnError(w, r, http.StatusBadRequest, badHandshake+"'upgrade' token not found in 'Connection' header") + } + + if !tokenListContainsValue(r.Header, "Upgrade", "websocket") { + return u.returnError(w, r, http.StatusBadRequest, badHandshake+"'websocket' token not found in 'Upgrade' header") + } + + if r.Method != "GET" { + return u.returnError(w, r, http.StatusMethodNotAllowed, badHandshake+"request method is not GET") + } + + if !tokenListContainsValue(r.Header, "Sec-Websocket-Version", "13") { + return u.returnError(w, r, http.StatusBadRequest, "websocket: unsupported version: 13 not found in 'Sec-Websocket-Version' header") + } + + if _, ok := responseHeader["Sec-Websocket-Extensions"]; ok { + return u.returnError(w, r, http.StatusInternalServerError, "websocket: application specific 'Sec-WebSocket-Extensions' headers are unsupported") + } + + checkOrigin := u.CheckOrigin + if checkOrigin == nil { + checkOrigin = checkSameOrigin + } + if !checkOrigin(r) { + return u.returnError(w, r, http.StatusForbidden, "websocket: request origin not allowed by Upgrader.CheckOrigin") + } + + challengeKey := r.Header.Get("Sec-Websocket-Key") + if challengeKey == "" { + return u.returnError(w, r, http.StatusBadRequest, "websocket: not a websocket handshake: `Sec-WebSocket-Key' header is missing or blank") + } + + subprotocol := u.selectSubprotocol(r, responseHeader) + + // Negotiate PMCE + var compress bool + if u.EnableCompression { + for _, ext := range parseExtensions(r.Header) { + if ext[""] != "permessage-deflate" { + continue + } + compress = true + break + } + } + + h, ok := w.(http.Hijacker) + if !ok { + return u.returnError(w, r, http.StatusInternalServerError, "websocket: response does not implement http.Hijacker") + } + var brw *bufio.ReadWriter + netConn, brw, err := h.Hijack() + if err != nil { + return u.returnError(w, r, http.StatusInternalServerError, err.Error()) + } + + if brw.Reader.Buffered() > 0 { + netConn.Close() + return nil, errors.New("websocket: client sent data before handshake is complete") + } + + var br *bufio.Reader + if u.ReadBufferSize == 0 && bufioReaderSize(netConn, brw.Reader) > 256 { + // Reuse hijacked buffered reader as connection reader. + br = brw.Reader + } + + buf := bufioWriterBuffer(netConn, brw.Writer) + + var writeBuf []byte + if u.WriteBufferPool == nil && u.WriteBufferSize == 0 && len(buf) >= maxFrameHeaderSize+256 { + // Reuse hijacked write buffer as connection buffer. + writeBuf = buf + } + + c := newConn(netConn, true, u.ReadBufferSize, u.WriteBufferSize, u.WriteBufferPool, br, writeBuf) + c.subprotocol = subprotocol + + if compress { + c.newCompressionWriter = compressNoContextTakeover + c.newDecompressionReader = decompressNoContextTakeover + } + + // Use larger of hijacked buffer and connection write buffer for header. + p := buf + if len(c.writeBuf) > len(p) { + p = c.writeBuf + } + p = p[:0] + + p = append(p, "HTTP/1.1 101 Switching Protocols\r\nUpgrade: websocket\r\nConnection: Upgrade\r\nSec-WebSocket-Accept: "...) + p = append(p, computeAcceptKey(challengeKey)...) + p = append(p, "\r\n"...) + if c.subprotocol != "" { + p = append(p, "Sec-WebSocket-Protocol: "...) + p = append(p, c.subprotocol...) + p = append(p, "\r\n"...) + } + if compress { + p = append(p, "Sec-WebSocket-Extensions: permessage-deflate; server_no_context_takeover; client_no_context_takeover\r\n"...) + } + for k, vs := range responseHeader { + if k == "Sec-Websocket-Protocol" { + continue + } + for _, v := range vs { + p = append(p, k...) + p = append(p, ": "...) + for i := 0; i < len(v); i++ { + b := v[i] + if b <= 31 { + // prevent response splitting. + b = ' ' + } + p = append(p, b) + } + p = append(p, "\r\n"...) + } + } + p = append(p, "\r\n"...) + + // Clear deadlines set by HTTP server. + netConn.SetDeadline(time.Time{}) + + if u.HandshakeTimeout > 0 { + netConn.SetWriteDeadline(time.Now().Add(u.HandshakeTimeout)) + } + if _, err = netConn.Write(p); err != nil { + netConn.Close() + return nil, err + } + if u.HandshakeTimeout > 0 { + netConn.SetWriteDeadline(time.Time{}) + } + + return c, nil +} + +// Upgrade upgrades the HTTP server connection to the WebSocket protocol. +// +// Deprecated: Use websocket.Upgrader instead. +// +// Upgrade does not perform origin checking. The application is responsible for +// checking the Origin header before calling Upgrade. An example implementation +// of the same origin policy check is: +// +// if req.Header.Get("Origin") != "http://"+req.Host { +// http.Error(w, "Origin not allowed", http.StatusForbidden) +// return +// } +// +// If the endpoint supports subprotocols, then the application is responsible +// for negotiating the protocol used on the connection. Use the Subprotocols() +// function to get the subprotocols requested by the client. Use the +// Sec-Websocket-Protocol response header to specify the subprotocol selected +// by the application. +// +// The responseHeader is included in the response to the client's upgrade +// request. Use the responseHeader to specify cookies (Set-Cookie) and the +// negotiated subprotocol (Sec-Websocket-Protocol). +// +// The connection buffers IO to the underlying network connection. The +// readBufSize and writeBufSize parameters specify the size of the buffers to +// use. Messages can be larger than the buffers. +// +// If the request is not a valid WebSocket handshake, then Upgrade returns an +// error of type HandshakeError. Applications should handle this error by +// replying to the client with an HTTP error response. +func Upgrade(w http.ResponseWriter, r *http.Request, responseHeader http.Header, readBufSize, writeBufSize int) (*Conn, error) { + u := Upgrader{ReadBufferSize: readBufSize, WriteBufferSize: writeBufSize} + u.Error = func(w http.ResponseWriter, r *http.Request, status int, reason error) { + // don't return errors to maintain backwards compatibility + } + u.CheckOrigin = func(r *http.Request) bool { + // allow all connections by default + return true + } + return u.Upgrade(w, r, responseHeader) +} + +// Subprotocols returns the subprotocols requested by the client in the +// Sec-Websocket-Protocol header. +func Subprotocols(r *http.Request) []string { + h := strings.TrimSpace(r.Header.Get("Sec-Websocket-Protocol")) + if h == "" { + return nil + } + protocols := strings.Split(h, ",") + for i := range protocols { + protocols[i] = strings.TrimSpace(protocols[i]) + } + return protocols +} + +// IsWebSocketUpgrade returns true if the client requested upgrade to the +// WebSocket protocol. +func IsWebSocketUpgrade(r *http.Request) bool { + return tokenListContainsValue(r.Header, "Connection", "upgrade") && + tokenListContainsValue(r.Header, "Upgrade", "websocket") +} + +// bufioReaderSize size returns the size of a bufio.Reader. +func bufioReaderSize(originalReader io.Reader, br *bufio.Reader) int { + // This code assumes that peek on a reset reader returns + // bufio.Reader.buf[:0]. + // TODO: Use bufio.Reader.Size() after Go 1.10 + br.Reset(originalReader) + if p, err := br.Peek(0); err == nil { + return cap(p) + } + return 0 +} + +// writeHook is an io.Writer that records the last slice passed to it vio +// io.Writer.Write. +type writeHook struct { + p []byte +} + +func (wh *writeHook) Write(p []byte) (int, error) { + wh.p = p + return len(p), nil +} + +// bufioWriterBuffer grabs the buffer from a bufio.Writer. +func bufioWriterBuffer(originalWriter io.Writer, bw *bufio.Writer) []byte { + // This code assumes that bufio.Writer.buf[:1] is passed to the + // bufio.Writer's underlying writer. + var wh writeHook + bw.Reset(&wh) + bw.WriteByte(0) + bw.Flush() + + bw.Reset(originalWriter) + + return wh.p[:cap(wh.p)] +} diff --git a/vendor/github.com/gorilla/websocket/trace.go b/vendor/github.com/gorilla/websocket/trace.go new file mode 100644 index 00000000..834f122a --- /dev/null +++ b/vendor/github.com/gorilla/websocket/trace.go @@ -0,0 +1,19 @@ +// +build go1.8 + +package websocket + +import ( + "crypto/tls" + "net/http/httptrace" +) + +func doHandshakeWithTrace(trace *httptrace.ClientTrace, tlsConn *tls.Conn, cfg *tls.Config) error { + if trace.TLSHandshakeStart != nil { + trace.TLSHandshakeStart() + } + err := doHandshake(tlsConn, cfg) + if trace.TLSHandshakeDone != nil { + trace.TLSHandshakeDone(tlsConn.ConnectionState(), err) + } + return err +} diff --git a/vendor/github.com/gorilla/websocket/trace_17.go b/vendor/github.com/gorilla/websocket/trace_17.go new file mode 100644 index 00000000..77d05a0b --- /dev/null +++ b/vendor/github.com/gorilla/websocket/trace_17.go @@ -0,0 +1,12 @@ +// +build !go1.8 + +package websocket + +import ( + "crypto/tls" + "net/http/httptrace" +) + +func doHandshakeWithTrace(trace *httptrace.ClientTrace, tlsConn *tls.Conn, cfg *tls.Config) error { + return doHandshake(tlsConn, cfg) +} diff --git a/vendor/github.com/gorilla/websocket/util.go b/vendor/github.com/gorilla/websocket/util.go new file mode 100644 index 00000000..7bf2f66c --- /dev/null +++ b/vendor/github.com/gorilla/websocket/util.go @@ -0,0 +1,283 @@ +// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package websocket + +import ( + "crypto/rand" + "crypto/sha1" + "encoding/base64" + "io" + "net/http" + "strings" + "unicode/utf8" +) + +var keyGUID = []byte("258EAFA5-E914-47DA-95CA-C5AB0DC85B11") + +func computeAcceptKey(challengeKey string) string { + h := sha1.New() + h.Write([]byte(challengeKey)) + h.Write(keyGUID) + return base64.StdEncoding.EncodeToString(h.Sum(nil)) +} + +func generateChallengeKey() (string, error) { + p := make([]byte, 16) + if _, err := io.ReadFull(rand.Reader, p); err != nil { + return "", err + } + return base64.StdEncoding.EncodeToString(p), nil +} + +// Token octets per RFC 2616. +var isTokenOctet = [256]bool{ + '!': true, + '#': true, + '$': true, + '%': true, + '&': true, + '\'': true, + '*': true, + '+': true, + '-': true, + '.': true, + '0': true, + '1': true, + '2': true, + '3': true, + '4': true, + '5': true, + '6': true, + '7': true, + '8': true, + '9': true, + 'A': true, + 'B': true, + 'C': true, + 'D': true, + 'E': true, + 'F': true, + 'G': true, + 'H': true, + 'I': true, + 'J': true, + 'K': true, + 'L': true, + 'M': true, + 'N': true, + 'O': true, + 'P': true, + 'Q': true, + 'R': true, + 'S': true, + 'T': true, + 'U': true, + 'W': true, + 'V': true, + 'X': true, + 'Y': true, + 'Z': true, + '^': true, + '_': true, + '`': true, + 'a': true, + 'b': true, + 'c': true, + 'd': true, + 'e': true, + 'f': true, + 'g': true, + 'h': true, + 'i': true, + 'j': true, + 'k': true, + 'l': true, + 'm': true, + 'n': true, + 'o': true, + 'p': true, + 'q': true, + 'r': true, + 's': true, + 't': true, + 'u': true, + 'v': true, + 'w': true, + 'x': true, + 'y': true, + 'z': true, + '|': true, + '~': true, +} + +// skipSpace returns a slice of the string s with all leading RFC 2616 linear +// whitespace removed. +func skipSpace(s string) (rest string) { + i := 0 + for ; i < len(s); i++ { + if b := s[i]; b != ' ' && b != '\t' { + break + } + } + return s[i:] +} + +// nextToken returns the leading RFC 2616 token of s and the string following +// the token. +func nextToken(s string) (token, rest string) { + i := 0 + for ; i < len(s); i++ { + if !isTokenOctet[s[i]] { + break + } + } + return s[:i], s[i:] +} + +// nextTokenOrQuoted returns the leading token or quoted string per RFC 2616 +// and the string following the token or quoted string. +func nextTokenOrQuoted(s string) (value string, rest string) { + if !strings.HasPrefix(s, "\"") { + return nextToken(s) + } + s = s[1:] + for i := 0; i < len(s); i++ { + switch s[i] { + case '"': + return s[:i], s[i+1:] + case '\\': + p := make([]byte, len(s)-1) + j := copy(p, s[:i]) + escape := true + for i = i + 1; i < len(s); i++ { + b := s[i] + switch { + case escape: + escape = false + p[j] = b + j++ + case b == '\\': + escape = true + case b == '"': + return string(p[:j]), s[i+1:] + default: + p[j] = b + j++ + } + } + return "", "" + } + } + return "", "" +} + +// equalASCIIFold returns true if s is equal to t with ASCII case folding as +// defined in RFC 4790. +func equalASCIIFold(s, t string) bool { + for s != "" && t != "" { + sr, size := utf8.DecodeRuneInString(s) + s = s[size:] + tr, size := utf8.DecodeRuneInString(t) + t = t[size:] + if sr == tr { + continue + } + if 'A' <= sr && sr <= 'Z' { + sr = sr + 'a' - 'A' + } + if 'A' <= tr && tr <= 'Z' { + tr = tr + 'a' - 'A' + } + if sr != tr { + return false + } + } + return s == t +} + +// tokenListContainsValue returns true if the 1#token header with the given +// name contains a token equal to value with ASCII case folding. +func tokenListContainsValue(header http.Header, name string, value string) bool { +headers: + for _, s := range header[name] { + for { + var t string + t, s = nextToken(skipSpace(s)) + if t == "" { + continue headers + } + s = skipSpace(s) + if s != "" && s[0] != ',' { + continue headers + } + if equalASCIIFold(t, value) { + return true + } + if s == "" { + continue headers + } + s = s[1:] + } + } + return false +} + +// parseExtensions parses WebSocket extensions from a header. +func parseExtensions(header http.Header) []map[string]string { + // From RFC 6455: + // + // Sec-WebSocket-Extensions = extension-list + // extension-list = 1#extension + // extension = extension-token *( ";" extension-param ) + // extension-token = registered-token + // registered-token = token + // extension-param = token [ "=" (token | quoted-string) ] + // ;When using the quoted-string syntax variant, the value + // ;after quoted-string unescaping MUST conform to the + // ;'token' ABNF. + + var result []map[string]string +headers: + for _, s := range header["Sec-Websocket-Extensions"] { + for { + var t string + t, s = nextToken(skipSpace(s)) + if t == "" { + continue headers + } + ext := map[string]string{"": t} + for { + s = skipSpace(s) + if !strings.HasPrefix(s, ";") { + break + } + var k string + k, s = nextToken(skipSpace(s[1:])) + if k == "" { + continue headers + } + s = skipSpace(s) + var v string + if strings.HasPrefix(s, "=") { + v, s = nextTokenOrQuoted(skipSpace(s[1:])) + s = skipSpace(s) + } + if s != "" && s[0] != ',' && s[0] != ';' { + continue headers + } + ext[k] = v + } + if s != "" && s[0] != ',' { + continue headers + } + result = append(result, ext) + if s == "" { + continue headers + } + s = s[1:] + } + } + return result +} diff --git a/vendor/github.com/gorilla/websocket/x_net_proxy.go b/vendor/github.com/gorilla/websocket/x_net_proxy.go new file mode 100644 index 00000000..2e668f6b --- /dev/null +++ b/vendor/github.com/gorilla/websocket/x_net_proxy.go @@ -0,0 +1,473 @@ +// Code generated by golang.org/x/tools/cmd/bundle. DO NOT EDIT. +//go:generate bundle -o x_net_proxy.go golang.org/x/net/proxy + +// Package proxy provides support for a variety of protocols to proxy network +// data. +// + +package websocket + +import ( + "errors" + "io" + "net" + "net/url" + "os" + "strconv" + "strings" + "sync" +) + +type proxy_direct struct{} + +// Direct is a direct proxy: one that makes network connections directly. +var proxy_Direct = proxy_direct{} + +func (proxy_direct) Dial(network, addr string) (net.Conn, error) { + return net.Dial(network, addr) +} + +// A PerHost directs connections to a default Dialer unless the host name +// requested matches one of a number of exceptions. +type proxy_PerHost struct { + def, bypass proxy_Dialer + + bypassNetworks []*net.IPNet + bypassIPs []net.IP + bypassZones []string + bypassHosts []string +} + +// NewPerHost returns a PerHost Dialer that directs connections to either +// defaultDialer or bypass, depending on whether the connection matches one of +// the configured rules. +func proxy_NewPerHost(defaultDialer, bypass proxy_Dialer) *proxy_PerHost { + return &proxy_PerHost{ + def: defaultDialer, + bypass: bypass, + } +} + +// Dial connects to the address addr on the given network through either +// defaultDialer or bypass. +func (p *proxy_PerHost) Dial(network, addr string) (c net.Conn, err error) { + host, _, err := net.SplitHostPort(addr) + if err != nil { + return nil, err + } + + return p.dialerForRequest(host).Dial(network, addr) +} + +func (p *proxy_PerHost) dialerForRequest(host string) proxy_Dialer { + if ip := net.ParseIP(host); ip != nil { + for _, net := range p.bypassNetworks { + if net.Contains(ip) { + return p.bypass + } + } + for _, bypassIP := range p.bypassIPs { + if bypassIP.Equal(ip) { + return p.bypass + } + } + return p.def + } + + for _, zone := range p.bypassZones { + if strings.HasSuffix(host, zone) { + return p.bypass + } + if host == zone[1:] { + // For a zone ".example.com", we match "example.com" + // too. + return p.bypass + } + } + for _, bypassHost := range p.bypassHosts { + if bypassHost == host { + return p.bypass + } + } + return p.def +} + +// AddFromString parses a string that contains comma-separated values +// specifying hosts that should use the bypass proxy. Each value is either an +// IP address, a CIDR range, a zone (*.example.com) or a host name +// (localhost). A best effort is made to parse the string and errors are +// ignored. +func (p *proxy_PerHost) AddFromString(s string) { + hosts := strings.Split(s, ",") + for _, host := range hosts { + host = strings.TrimSpace(host) + if len(host) == 0 { + continue + } + if strings.Contains(host, "/") { + // We assume that it's a CIDR address like 127.0.0.0/8 + if _, net, err := net.ParseCIDR(host); err == nil { + p.AddNetwork(net) + } + continue + } + if ip := net.ParseIP(host); ip != nil { + p.AddIP(ip) + continue + } + if strings.HasPrefix(host, "*.") { + p.AddZone(host[1:]) + continue + } + p.AddHost(host) + } +} + +// AddIP specifies an IP address that will use the bypass proxy. Note that +// this will only take effect if a literal IP address is dialed. A connection +// to a named host will never match an IP. +func (p *proxy_PerHost) AddIP(ip net.IP) { + p.bypassIPs = append(p.bypassIPs, ip) +} + +// AddNetwork specifies an IP range that will use the bypass proxy. Note that +// this will only take effect if a literal IP address is dialed. A connection +// to a named host will never match. +func (p *proxy_PerHost) AddNetwork(net *net.IPNet) { + p.bypassNetworks = append(p.bypassNetworks, net) +} + +// AddZone specifies a DNS suffix that will use the bypass proxy. A zone of +// "example.com" matches "example.com" and all of its subdomains. +func (p *proxy_PerHost) AddZone(zone string) { + if strings.HasSuffix(zone, ".") { + zone = zone[:len(zone)-1] + } + if !strings.HasPrefix(zone, ".") { + zone = "." + zone + } + p.bypassZones = append(p.bypassZones, zone) +} + +// AddHost specifies a host name that will use the bypass proxy. +func (p *proxy_PerHost) AddHost(host string) { + if strings.HasSuffix(host, ".") { + host = host[:len(host)-1] + } + p.bypassHosts = append(p.bypassHosts, host) +} + +// A Dialer is a means to establish a connection. +type proxy_Dialer interface { + // Dial connects to the given address via the proxy. + Dial(network, addr string) (c net.Conn, err error) +} + +// Auth contains authentication parameters that specific Dialers may require. +type proxy_Auth struct { + User, Password string +} + +// FromEnvironment returns the dialer specified by the proxy related variables in +// the environment. +func proxy_FromEnvironment() proxy_Dialer { + allProxy := proxy_allProxyEnv.Get() + if len(allProxy) == 0 { + return proxy_Direct + } + + proxyURL, err := url.Parse(allProxy) + if err != nil { + return proxy_Direct + } + proxy, err := proxy_FromURL(proxyURL, proxy_Direct) + if err != nil { + return proxy_Direct + } + + noProxy := proxy_noProxyEnv.Get() + if len(noProxy) == 0 { + return proxy + } + + perHost := proxy_NewPerHost(proxy, proxy_Direct) + perHost.AddFromString(noProxy) + return perHost +} + +// proxySchemes is a map from URL schemes to a function that creates a Dialer +// from a URL with such a scheme. +var proxy_proxySchemes map[string]func(*url.URL, proxy_Dialer) (proxy_Dialer, error) + +// RegisterDialerType takes a URL scheme and a function to generate Dialers from +// a URL with that scheme and a forwarding Dialer. Registered schemes are used +// by FromURL. +func proxy_RegisterDialerType(scheme string, f func(*url.URL, proxy_Dialer) (proxy_Dialer, error)) { + if proxy_proxySchemes == nil { + proxy_proxySchemes = make(map[string]func(*url.URL, proxy_Dialer) (proxy_Dialer, error)) + } + proxy_proxySchemes[scheme] = f +} + +// FromURL returns a Dialer given a URL specification and an underlying +// Dialer for it to make network requests. +func proxy_FromURL(u *url.URL, forward proxy_Dialer) (proxy_Dialer, error) { + var auth *proxy_Auth + if u.User != nil { + auth = new(proxy_Auth) + auth.User = u.User.Username() + if p, ok := u.User.Password(); ok { + auth.Password = p + } + } + + switch u.Scheme { + case "socks5": + return proxy_SOCKS5("tcp", u.Host, auth, forward) + } + + // If the scheme doesn't match any of the built-in schemes, see if it + // was registered by another package. + if proxy_proxySchemes != nil { + if f, ok := proxy_proxySchemes[u.Scheme]; ok { + return f(u, forward) + } + } + + return nil, errors.New("proxy: unknown scheme: " + u.Scheme) +} + +var ( + proxy_allProxyEnv = &proxy_envOnce{ + names: []string{"ALL_PROXY", "all_proxy"}, + } + proxy_noProxyEnv = &proxy_envOnce{ + names: []string{"NO_PROXY", "no_proxy"}, + } +) + +// envOnce looks up an environment variable (optionally by multiple +// names) once. It mitigates expensive lookups on some platforms +// (e.g. Windows). +// (Borrowed from net/http/transport.go) +type proxy_envOnce struct { + names []string + once sync.Once + val string +} + +func (e *proxy_envOnce) Get() string { + e.once.Do(e.init) + return e.val +} + +func (e *proxy_envOnce) init() { + for _, n := range e.names { + e.val = os.Getenv(n) + if e.val != "" { + return + } + } +} + +// SOCKS5 returns a Dialer that makes SOCKSv5 connections to the given address +// with an optional username and password. See RFC 1928 and RFC 1929. +func proxy_SOCKS5(network, addr string, auth *proxy_Auth, forward proxy_Dialer) (proxy_Dialer, error) { + s := &proxy_socks5{ + network: network, + addr: addr, + forward: forward, + } + if auth != nil { + s.user = auth.User + s.password = auth.Password + } + + return s, nil +} + +type proxy_socks5 struct { + user, password string + network, addr string + forward proxy_Dialer +} + +const proxy_socks5Version = 5 + +const ( + proxy_socks5AuthNone = 0 + proxy_socks5AuthPassword = 2 +) + +const proxy_socks5Connect = 1 + +const ( + proxy_socks5IP4 = 1 + proxy_socks5Domain = 3 + proxy_socks5IP6 = 4 +) + +var proxy_socks5Errors = []string{ + "", + "general failure", + "connection forbidden", + "network unreachable", + "host unreachable", + "connection refused", + "TTL expired", + "command not supported", + "address type not supported", +} + +// Dial connects to the address addr on the given network via the SOCKS5 proxy. +func (s *proxy_socks5) Dial(network, addr string) (net.Conn, error) { + switch network { + case "tcp", "tcp6", "tcp4": + default: + return nil, errors.New("proxy: no support for SOCKS5 proxy connections of type " + network) + } + + conn, err := s.forward.Dial(s.network, s.addr) + if err != nil { + return nil, err + } + if err := s.connect(conn, addr); err != nil { + conn.Close() + return nil, err + } + return conn, nil +} + +// connect takes an existing connection to a socks5 proxy server, +// and commands the server to extend that connection to target, +// which must be a canonical address with a host and port. +func (s *proxy_socks5) connect(conn net.Conn, target string) error { + host, portStr, err := net.SplitHostPort(target) + if err != nil { + return err + } + + port, err := strconv.Atoi(portStr) + if err != nil { + return errors.New("proxy: failed to parse port number: " + portStr) + } + if port < 1 || port > 0xffff { + return errors.New("proxy: port number out of range: " + portStr) + } + + // the size here is just an estimate + buf := make([]byte, 0, 6+len(host)) + + buf = append(buf, proxy_socks5Version) + if len(s.user) > 0 && len(s.user) < 256 && len(s.password) < 256 { + buf = append(buf, 2 /* num auth methods */, proxy_socks5AuthNone, proxy_socks5AuthPassword) + } else { + buf = append(buf, 1 /* num auth methods */, proxy_socks5AuthNone) + } + + if _, err := conn.Write(buf); err != nil { + return errors.New("proxy: failed to write greeting to SOCKS5 proxy at " + s.addr + ": " + err.Error()) + } + + if _, err := io.ReadFull(conn, buf[:2]); err != nil { + return errors.New("proxy: failed to read greeting from SOCKS5 proxy at " + s.addr + ": " + err.Error()) + } + if buf[0] != 5 { + return errors.New("proxy: SOCKS5 proxy at " + s.addr + " has unexpected version " + strconv.Itoa(int(buf[0]))) + } + if buf[1] == 0xff { + return errors.New("proxy: SOCKS5 proxy at " + s.addr + " requires authentication") + } + + // See RFC 1929 + if buf[1] == proxy_socks5AuthPassword { + buf = buf[:0] + buf = append(buf, 1 /* password protocol version */) + buf = append(buf, uint8(len(s.user))) + buf = append(buf, s.user...) + buf = append(buf, uint8(len(s.password))) + buf = append(buf, s.password...) + + if _, err := conn.Write(buf); err != nil { + return errors.New("proxy: failed to write authentication request to SOCKS5 proxy at " + s.addr + ": " + err.Error()) + } + + if _, err := io.ReadFull(conn, buf[:2]); err != nil { + return errors.New("proxy: failed to read authentication reply from SOCKS5 proxy at " + s.addr + ": " + err.Error()) + } + + if buf[1] != 0 { + return errors.New("proxy: SOCKS5 proxy at " + s.addr + " rejected username/password") + } + } + + buf = buf[:0] + buf = append(buf, proxy_socks5Version, proxy_socks5Connect, 0 /* reserved */) + + if ip := net.ParseIP(host); ip != nil { + if ip4 := ip.To4(); ip4 != nil { + buf = append(buf, proxy_socks5IP4) + ip = ip4 + } else { + buf = append(buf, proxy_socks5IP6) + } + buf = append(buf, ip...) + } else { + if len(host) > 255 { + return errors.New("proxy: destination host name too long: " + host) + } + buf = append(buf, proxy_socks5Domain) + buf = append(buf, byte(len(host))) + buf = append(buf, host...) + } + buf = append(buf, byte(port>>8), byte(port)) + + if _, err := conn.Write(buf); err != nil { + return errors.New("proxy: failed to write connect request to SOCKS5 proxy at " + s.addr + ": " + err.Error()) + } + + if _, err := io.ReadFull(conn, buf[:4]); err != nil { + return errors.New("proxy: failed to read connect reply from SOCKS5 proxy at " + s.addr + ": " + err.Error()) + } + + failure := "unknown error" + if int(buf[1]) < len(proxy_socks5Errors) { + failure = proxy_socks5Errors[buf[1]] + } + + if len(failure) > 0 { + return errors.New("proxy: SOCKS5 proxy at " + s.addr + " failed to connect: " + failure) + } + + bytesToDiscard := 0 + switch buf[3] { + case proxy_socks5IP4: + bytesToDiscard = net.IPv4len + case proxy_socks5IP6: + bytesToDiscard = net.IPv6len + case proxy_socks5Domain: + _, err := io.ReadFull(conn, buf[:1]) + if err != nil { + return errors.New("proxy: failed to read domain length from SOCKS5 proxy at " + s.addr + ": " + err.Error()) + } + bytesToDiscard = int(buf[0]) + default: + return errors.New("proxy: got unknown address type " + strconv.Itoa(int(buf[3])) + " from SOCKS5 proxy at " + s.addr) + } + + if cap(buf) < bytesToDiscard { + buf = make([]byte, bytesToDiscard) + } else { + buf = buf[:bytesToDiscard] + } + if _, err := io.ReadFull(conn, buf); err != nil { + return errors.New("proxy: failed to read address from SOCKS5 proxy at " + s.addr + ": " + err.Error()) + } + + // Also need to discard the port number + if _, err := io.ReadFull(conn, buf[:2]); err != nil { + return errors.New("proxy: failed to read port from SOCKS5 proxy at " + s.addr + ": " + err.Error()) + } + + return nil +} diff --git a/vendor/vendor.json b/vendor/vendor.json index 96528cc8..cc622fd2 100644 --- a/vendor/vendor.json +++ b/vendor/vendor.json @@ -21,64 +21,88 @@ "revisionTime": "2019-01-29T13:58:41Z" }, { - "checksumSHA1": "leH/fyl/KmShljBp5TdSsQr9xyc=", + "checksumSHA1": "4hOuaa2bRo+okR3xsWFCTeo1NNc=", "path": "github.com/ONSdigital/dp-graph/config", - "revision": "455120c204d33ab78d5663486630583373856e41", - "revisionTime": "2019-03-25T17:53:13Z" + "revision": "2d25f6c0a2b8eabd83fa292b374c305ad3ec8075", + "revisionTime": "2019-07-11T10:42:33Z" }, { - "checksumSHA1": "UeeWNjQu6xW/Hw+v4MAfctMmIZ8=", + "checksumSHA1": "Qg+JFeVzRnx1l8wM3ZE04gLExcs=", "path": "github.com/ONSdigital/dp-graph/graph", - "revision": "455120c204d33ab78d5663486630583373856e41", - "revisionTime": "2019-03-25T17:53:13Z" + "revision": "2d25f6c0a2b8eabd83fa292b374c305ad3ec8075", + "revisionTime": "2019-07-11T10:42:33Z" }, { - "checksumSHA1": "Pcuy1j/u/ZJFwwooiNS9phPqOic=", + "checksumSHA1": "263RXdCaG1tXdn57RTXlzP1TYS8=", "path": "github.com/ONSdigital/dp-graph/graph/driver", - "revision": "455120c204d33ab78d5663486630583373856e41", - "revisionTime": "2019-03-25T17:53:13Z" + "revision": "2d25f6c0a2b8eabd83fa292b374c305ad3ec8075", + "revisionTime": "2019-07-11T10:42:33Z" }, { "checksumSHA1": "PyMyKOBc90I+59WEPLD6Aovpv/4=", "path": "github.com/ONSdigital/dp-graph/mock", - "revision": "455120c204d33ab78d5663486630583373856e41", - "revisionTime": "2019-03-25T17:53:13Z" + "revision": "2d25f6c0a2b8eabd83fa292b374c305ad3ec8075", + "revisionTime": "2019-07-11T10:42:33Z" }, { - "checksumSHA1": "bljN5KJ5+Mijg+yvE+ZZQIuJNyw=", + "checksumSHA1": "rlvdEFCCXvGZW0R5HBM0FEEhN38=", "path": "github.com/ONSdigital/dp-graph/neo4j", - "revision": "455120c204d33ab78d5663486630583373856e41", - "revisionTime": "2019-03-25T17:53:13Z" - }, - { - "checksumSHA1": "10C6dRVYfPkcgYHMGLsJLfkPvck=", - "path": "github.com/ONSdigital/dp-graph/neo4j/driver", - "revision": "455120c204d33ab78d5663486630583373856e41", - "revisionTime": "2019-03-25T17:53:13Z" + "revision": "2d25f6c0a2b8eabd83fa292b374c305ad3ec8075", + "revisionTime": "2019-07-11T10:42:33Z" }, { "checksumSHA1": "u0TDiIBM7IO/NZDXOvEwm3xbqvw=", "path": "github.com/ONSdigital/dp-graph/neo4j/mapper", - "revision": "455120c204d33ab78d5663486630583373856e41", - "revisionTime": "2019-03-25T17:53:13Z" + "revision": "2d25f6c0a2b8eabd83fa292b374c305ad3ec8075", + "revisionTime": "2019-07-11T10:42:33Z" }, { - "checksumSHA1": "e2tM4AYGw5b9ZZjEh8eSw0yzGyA=", + "checksumSHA1": "fWNLT90j5B/ygFLfYpUB+TMoMaA=", + "path": "github.com/ONSdigital/dp-graph/neo4j/neo4jdriver", + "revision": "2d25f6c0a2b8eabd83fa292b374c305ad3ec8075", + "revisionTime": "2019-07-11T10:42:33Z" + }, + { + "checksumSHA1": "QV5LGj4rOKWT5tvkZdzixj4D2OM=", "path": "github.com/ONSdigital/dp-graph/neo4j/query", - "revision": "455120c204d33ab78d5663486630583373856e41", - "revisionTime": "2019-03-25T17:53:13Z" + "revision": "2d25f6c0a2b8eabd83fa292b374c305ad3ec8075", + "revisionTime": "2019-07-11T10:42:33Z" + }, + { + "checksumSHA1": "CVWm0P5TBbtKwNY6VZ4+TPvfcmw=", + "path": "github.com/ONSdigital/dp-graph/neptune", + "revision": "2d25f6c0a2b8eabd83fa292b374c305ad3ec8075", + "revisionTime": "2019-07-11T10:42:33Z" + }, + { + "checksumSHA1": "Z5TQx5pNn90l8zjeSbsSgc9hqP8=", + "path": "github.com/ONSdigital/dp-graph/neptune/driver", + "revision": "2d25f6c0a2b8eabd83fa292b374c305ad3ec8075", + "revisionTime": "2019-07-11T10:42:33Z" + }, + { + "checksumSHA1": "WdzbL61yx299fxN4CYuMx/rJd5w=", + "path": "github.com/ONSdigital/dp-graph/neptune/internal", + "revision": "2d25f6c0a2b8eabd83fa292b374c305ad3ec8075", + "revisionTime": "2019-07-11T10:42:33Z" }, { - "checksumSHA1": "4LiEZE5u3kqCBvzigVEhXcjac2g=", + "checksumSHA1": "zLyHRijgYcow+/o4TELraqBrhiU=", + "path": "github.com/ONSdigital/dp-graph/neptune/query", + "revision": "2d25f6c0a2b8eabd83fa292b374c305ad3ec8075", + "revisionTime": "2019-07-11T10:42:33Z" + }, + { + "checksumSHA1": "DRohWZAkyrLLQ5B5yq/bngMZbgM=", "path": "github.com/ONSdigital/dp-graph/observation", - "revision": "455120c204d33ab78d5663486630583373856e41", - "revisionTime": "2019-03-25T17:53:13Z" + "revision": "2d25f6c0a2b8eabd83fa292b374c305ad3ec8075", + "revisionTime": "2019-07-11T10:42:33Z" }, { - "checksumSHA1": "jWfoMEr6BOPZEKBIRTmnsRwopS0=", + "checksumSHA1": "ZVHJ3XHyBXaEFq1DN04u4+z6nP4=", "path": "github.com/ONSdigital/dp-graph/observation/observationtest", - "revision": "455120c204d33ab78d5663486630583373856e41", - "revisionTime": "2019-03-25T17:53:13Z" + "revision": "2d25f6c0a2b8eabd83fa292b374c305ad3ec8075", + "revisionTime": "2019-07-11T10:42:33Z" }, { "checksumSHA1": "sX7Krcb3Xi+QkzybgFfBj6hJ81A=", @@ -224,6 +248,12 @@ "revision": "da534111531d44d5dfcdda1b9057586bf7aac44c", "revisionTime": "2019-02-28T15:33:39Z" }, + { + "checksumSHA1": "xgjhvEt1uFeFa0DlYJQHvb/b0uk=", + "path": "github.com/ONSdigital/gremgo-neptune", + "revision": "8103e7ca8a44d85458fc10506f35bea78ecba754", + "revisionTime": "2019-07-12T13:38:19Z" + }, { "checksumSHA1": "+Jp0tVXfQ1TM8T+oun82oJtME5U=", "origin": "github.com/ONSdigital/go-ns/vendor/github.com/Shopify/sarama", @@ -266,6 +296,12 @@ "revision": "6920413b753350672215a083e0f9d5c270a21075", "revisionTime": "2017-11-28T09:28:02Z" }, + { + "checksumSHA1": "uE/knpBWtpFt1k/xZmpwSs+BhIs=", + "path": "github.com/gedge/graphson", + "revision": "d39cb8fe4384259290719c4b5693f39f0d8b85ca", + "revisionTime": "2019-05-31T09:24:26Z" + }, { "checksumSHA1": "C0PrqJwZS9A+Izfrk+YlZQJFsvg=", "path": "github.com/globalsign/mgo", @@ -303,6 +339,12 @@ "revision": "6920413b753350672215a083e0f9d5c270a21075", "revisionTime": "2017-11-28T09:28:02Z" }, + { + "checksumSHA1": "s9C2MKOjpuvhfEIfoG6U9/DdMZ4=", + "path": "github.com/gofrs/uuid", + "revision": "abfe1881e60ef34074c1b8d8c63b42565c356ed6", + "revisionTime": "2019-05-10T20:42:58Z" + }, { "checksumSHA1": "p/8vSviYF91gFflhrt5vkyksroo=", "origin": "github.com/ONSdigital/go-ns/vendor/github.com/golang/snappy", @@ -328,6 +370,12 @@ "revision": "ac112f7d75a0714af1bd86ab17749b31f7809640", "revisionTime": "2017-07-03T15:07:09Z" }, + { + "checksumSHA1": "3ORLCzeT6ci2RkN0jo1bY6bmE8A=", + "path": "github.com/gorilla/websocket", + "revision": "80c2d40e9b91f2ef7a9c1a403aeec64d1b89a9a6", + "revisionTime": "2019-04-27T04:03:06Z" + }, { "checksumSHA1": "TsX+LuxHhV9GFmua8C1nxflFcTA=", "path": "github.com/johnnadratowski/golang-neo4j-bolt-driver", From 5f0fa660acb66c7d7f5c2119c23aaf009d18e96a Mon Sep 17 00:00:00 2001 From: Eleanor Deal Date: Mon, 15 Jul 2019 11:13:50 +0100 Subject: [PATCH 02/15] Fix unit tests for imported mock name change --- api/observation_test.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/api/observation_test.go b/api/observation_test.go index 06a60e66..81b08d33 100644 --- a/api/observation_test.go +++ b/api/observation_test.go @@ -52,7 +52,7 @@ func TestGetObservationsReturnsOK(t *testing.T) { usagesNotes := &[]models.UsageNote{models.UsageNote{Title: "data_marking", Note: "this marks the obsevation with a special character"}} count := 0 - mockRowReader := &observationtest.CSVRowReaderMock{ + mockRowReader := &observationtest.StreamRowReaderMock{ ReadFunc: func() (string, error) { count++ if count == 1 { @@ -160,7 +160,7 @@ func TestGetObservationsReturnsOK(t *testing.T) { usagesNotes := &[]models.UsageNote{models.UsageNote{Title: "data_marking", Note: "this marks the observation with a special character"}} count := 0 - mockRowReader := &observationtest.CSVRowReaderMock{ + mockRowReader := &observationtest.StreamRowReaderMock{ ReadFunc: func() (string, error) { count++ if count == 1 { @@ -1105,7 +1105,7 @@ func TestGetObservationAuditSuccessfulError(t *testing.T) { usagesNotes := &[]models.UsageNote{models.UsageNote{Title: "data_marking", Note: "this marks the obsevation with a special character"}} count := 0 - mockRowReader := &observationtest.CSVRowReaderMock{ + mockRowReader := &observationtest.StreamRowReaderMock{ ReadFunc: func() (string, error) { count++ if count == 1 { From e24db35bc5d268218b1fd05af0802ac9ece79a50 Mon Sep 17 00:00:00 2001 From: Eleanor Deal Date: Fri, 2 Aug 2019 18:01:51 +0100 Subject: [PATCH 03/15] Update vendor of dp-graph and gremgo for changes to streaming/cursor --- .../dp-graph/neptune/driver/neptunepool.go | 5 +- .../ONSdigital/dp-graph/neptune/hierarchy.go | 61 +++-- .../neptune/internal/mockpoolutils.go | 16 +- .../dp-graph/neptune/internal/pool.go | 64 ++--- .../ONSdigital/dp-graph/neptune/mapper.go | 65 ++++- .../ONSdigital/dp-graph/neptune/neptune.go | 3 +- .../dp-graph/neptune/observation.go | 60 ++++- .../dp-graph/neptune/query/query.go | 16 +- .../ONSdigital/graphson/deserialize.go | 249 ++++++++++++++++++ .../github.com/ONSdigital/graphson/types.go | 153 +++++++++++ .../github.com/ONSdigital/graphson/utils.go | 246 +++++++++++++++++ .../ONSdigital/graphson/validation_utils.go | 94 +++++++ .../ONSdigital/gremgo-neptune/Makefile | 4 +- .../ONSdigital/gremgo-neptune/client.go | 21 +- .../ONSdigital/gremgo-neptune/cursor.go | 79 +++--- .../ONSdigital/gremgo-neptune/go.mod | 2 +- .../ONSdigital/gremgo-neptune/go.sum | 8 +- .../ONSdigital/gremgo-neptune/pool.go | 17 +- vendor/vendor.json | 76 +++--- 19 files changed, 1072 insertions(+), 167 deletions(-) create mode 100644 vendor/github.com/ONSdigital/graphson/deserialize.go create mode 100644 vendor/github.com/ONSdigital/graphson/types.go create mode 100644 vendor/github.com/ONSdigital/graphson/utils.go create mode 100644 vendor/github.com/ONSdigital/graphson/validation_utils.go diff --git a/vendor/github.com/ONSdigital/dp-graph/neptune/driver/neptunepool.go b/vendor/github.com/ONSdigital/dp-graph/neptune/driver/neptunepool.go index 25596b99..f053dd51 100644 --- a/vendor/github.com/ONSdigital/dp-graph/neptune/driver/neptunepool.go +++ b/vendor/github.com/ONSdigital/dp-graph/neptune/driver/neptunepool.go @@ -3,6 +3,7 @@ package driver import ( "context" + "github.com/ONSdigital/graphson" gremgo "github.com/ONSdigital/gremgo-neptune" ) @@ -15,9 +16,9 @@ connection Pool by the Neptune.Driver. type NeptunePool interface { Close() Execute(query string, bindings, rebindings map[string]string) (resp []gremgo.Response, err error) - Get(query string, bindings, rebindings map[string]string) (resp interface{}, err error) + Get(query string, bindings, rebindings map[string]string) ([]graphson.Vertex, error) GetCount(q string, bindings, rebindings map[string]string) (i int64, err error) GetE(q string, bindings, rebindings map[string]string) (resp interface{}, err error) - OpenCursorCtx(ctx context.Context, query string, bindings, rebindings map[string]string) (cursor *gremgo.Cursor, err error) + OpenStreamCursor(ctx context.Context, query string, bindings, rebindings map[string]string) (stream *gremgo.Stream, err error) GetStringList(query string, bindings, rebindings map[string]string) (vals []string, err error) } diff --git a/vendor/github.com/ONSdigital/dp-graph/neptune/hierarchy.go b/vendor/github.com/ONSdigital/dp-graph/neptune/hierarchy.go index 6617bed0..394b184c 100644 --- a/vendor/github.com/ONSdigital/dp-graph/neptune/hierarchy.go +++ b/vendor/github.com/ONSdigital/dp-graph/neptune/hierarchy.go @@ -4,10 +4,11 @@ import ( "context" "fmt" + "github.com/ONSdigital/dp-graph/graph/driver" "github.com/ONSdigital/dp-graph/neptune/query" "github.com/ONSdigital/dp-hierarchy-api/models" "github.com/ONSdigital/go-ns/log" - "github.com/gedge/graphson" + "github.com/ONSdigital/graphson" ) func (n *NeptuneDB) CreateInstanceHierarchyConstraints(ctx context.Context, attempt int, instanceID, dimensionName string) error { @@ -31,7 +32,7 @@ func (n *NeptuneDB) CloneNodes(ctx context.Context, attempt int, instanceID, cod log.Debug("cloning nodes from the generic hierarchy", logData) if _, err = n.getVertices(gremStmt); err != nil { - log.ErrorC("get", err, logData) + log.ErrorC("cannot get vertices during cloning", err, logData) return } @@ -49,7 +50,7 @@ func (n *NeptuneDB) CountNodes(ctx context.Context, instanceID, dimensionName st log.Debug("counting nodes in the new instance hierarchy", logData) if count, err = n.getNumber(gremStmt); err != nil { - log.ErrorC("getNumber", err, logData) + log.ErrorC("cannot count nodes in a hierarchy", err, logData) return } return @@ -74,7 +75,7 @@ func (n *NeptuneDB) CloneRelationships(ctx context.Context, attempt int, instanc log.Debug("cloning relationships from the generic hierarchy", logData) if _, err = n.getEdges(gremStmt); err != nil { - log.ErrorC("getEdges", err, logData) + log.ErrorC("cannot find edges while cloning relationships", err, logData) return } @@ -96,7 +97,7 @@ func (n *NeptuneDB) RemoveCloneEdges(ctx context.Context, attempt int, instanceI log.Debug("removing edges to generic hierarchy", logData) if _, err = n.exec(gremStmt); err != nil { - log.ErrorC("exec", err, logData) + log.ErrorC("exec failed while removing edges during removal of unwanted cloned edges", err, logData) return } return @@ -119,7 +120,7 @@ func (n *NeptuneDB) SetNumberOfChildren(ctx context.Context, attempt int, instan log.Debug("setting number-of-children property value on the instance hierarchy nodes", logData) if _, err = n.getVertices(gremStmt); err != nil { - log.ErrorC("getV", err, logData) + log.ErrorC("cannot find vertices while settting nChildren on hierarchy nodes", err, logData) return } @@ -144,7 +145,7 @@ func (n *NeptuneDB) SetHasData(ctx context.Context, attempt int, instanceID, dim log.Debug("setting has-data property on the instance hierarchy", logData) if _, err = n.getVertices(gremStmt); err != nil { - log.ErrorC("getV", err, logData) + log.ErrorC("cannot find vertices while setting hasData on hierarchy nodes", err, logData) return } @@ -168,7 +169,7 @@ func (n *NeptuneDB) MarkNodesToRemain(ctx context.Context, attempt int, instance log.Debug("marking nodes to remain after trimming sparse branches", logData) if _, err = n.getVertices(gremStmt); err != nil { - log.ErrorC("getV", err, logData) + log.ErrorC("cannot find vertices while marking hierarchy nodes to keep", err, logData) return } @@ -186,7 +187,7 @@ func (n *NeptuneDB) RemoveNodesNotMarkedToRemain(ctx context.Context, attempt in log.Debug("removing nodes not marked to remain after trimming sparse branches", logData) if _, err = n.exec(gremStmt); err != nil { - log.ErrorC("exec", err, logData) + log.ErrorC("exec query failed while removing hierarchy nodes to cull", err, logData) return } return @@ -203,7 +204,7 @@ func (n *NeptuneDB) RemoveRemainMarker(ctx context.Context, attempt int, instanc log.Debug("removing the remain property from the nodes that remain", logData) if _, err = n.exec(gremStmt); err != nil { - log.ErrorC("exec", err, logData) + log.ErrorC("exec query failed while removing spent remain markers from hierarchy nodes", err, logData) return } return @@ -220,11 +221,11 @@ func (n *NeptuneDB) GetHierarchyCodelist(ctx context.Context, instanceID, dimens var vertex graphson.Vertex if vertex, err = n.getVertex(gremStmt); err != nil { - log.ErrorC("get", err, logData) + log.ErrorC("cannot get vertices while searching for code list node related to hierarchy node", err, logData) return } if codelistID, err = vertex.GetProperty("code_list"); err != nil { - log.ErrorC("bad prop", err, logData) + log.ErrorC("cannot read code_list property from node", err, logData) return } return @@ -239,13 +240,29 @@ func (n *NeptuneDB) GetHierarchyRoot(ctx context.Context, instanceID, dimension "dimension_name": dimension, } - var vertex graphson.Vertex - if vertex, err = n.getVertex(gremStmt); err != nil { - log.ErrorC("get", err, logData) + var vertices []graphson.Vertex + if vertices, err = n.getVertices(gremStmt); err != nil { + log.ErrorC("getVertices failed: cannot find hierarchy root node candidates ", err, logData) + return + } + if len(vertices) == 0 { + err = driver.ErrNotFound + log.ErrorC("Cannot find hierarchy root node", err, logData) return } - if node, err = n.convertVertexToResponse(vertex, instanceID, dimension); err != nil { - log.ErrorC("conv", err, logData) + if len(vertices) > 1 { + err = driver.ErrMultipleFound + log.ErrorC("Cannot identify hierarchy root node because are multiple candidates", err, logData) + return + } + var vertex graphson.Vertex + vertex = vertices[0] + // Note the call to buildHierarchyNodeFromGraphsonVertex below does much more than meets the eye, + // including launching new queries in of itself to fetch child nodes, and + // breadcrumb nodes. + wantBreadcrumbs := false // Because meaningless for a root node + if node, err = n.buildHierarchyNodeFromGraphsonVertex(vertex, instanceID, dimension, wantBreadcrumbs); err != nil { + log.ErrorC("Cannot extract related information needed from hierarchy node", err, logData) return } return @@ -263,11 +280,15 @@ func (n *NeptuneDB) GetHierarchyElement(ctx context.Context, instanceID, dimensi var vertex graphson.Vertex if vertex, err = n.getVertex(gremStmt); err != nil { - log.ErrorC("get", err, logData) + log.ErrorC("Cannot find vertex when looking for specific hierarchy node", err, logData) return } - if node, err = n.convertVertexToResponse(vertex, instanceID, dimension); err != nil { - log.ErrorC("conv", err, logData) + // Note the call to buildHierarchyNodeFromGraphsonVertex below does much more than meets the eye, + // including launching new queries in of itself to fetch child nodes, and + // breadcrumb nodes. + wantBreadcrumbs := true // Because we are at depth in the hierarchy + if node, err = n.buildHierarchyNodeFromGraphsonVertex(vertex, instanceID, dimension, wantBreadcrumbs); err != nil { + log.ErrorC("Cannot extract related information needed from hierarchy node", err, logData) return } return diff --git a/vendor/github.com/ONSdigital/dp-graph/neptune/internal/mockpoolutils.go b/vendor/github.com/ONSdigital/dp-graph/neptune/internal/mockpoolutils.go index c63afc1f..5514aa2a 100644 --- a/vendor/github.com/ONSdigital/dp-graph/neptune/internal/mockpoolutils.go +++ b/vendor/github.com/ONSdigital/dp-graph/neptune/internal/mockpoolutils.go @@ -3,7 +3,7 @@ package internal import ( "fmt" - "github.com/gedge/graphson" + "github.com/ONSdigital/graphson" ) /* @@ -41,9 +41,9 @@ var ReturnMalformedIntRequestErr = func(q string, bindings, rebindings map[strin } // ReturnMalformedNilInterfaceRequestErr is a mock implementation for -// NeptunePool functions that return (Interface{}, error) which always returns an +// NeptunePool functions that return ([]graphson.Vertex, error) which always returns an // error that is judged to be not transient by neptune.isTransientError -var ReturnMalformedNilInterfaceRequestErr = func(q string, bindings, rebindings map[string]string) (interface{}, error) { +var ReturnMalformedNilInterfaceRequestErr = func(q string, bindings, rebindings map[string]string) ([]graphson.Vertex, error) { return nil, errors.New(" MALFORMED REQUEST ") } @@ -59,7 +59,7 @@ var ReturnMalformedStringListRequestErr = func(q string, bindings, rebindings ma // - of type "_code_list" // - with a "listID" property set to "listID_0", "listID_1", and "ListID_2" respectively. // - with an "edition" property set to "my-test-edition" -var ReturnThreeCodeLists = func(query string, bindings map[string]string, rebindings map[string]string) (interface{}, error) { +var ReturnThreeCodeLists = func(query string, bindings map[string]string, rebindings map[string]string) ([]graphson.Vertex, error) { codeLists := []graphson.Vertex{} for i := 0; i < 3; i++ { vertex := makeCodeListVertex(i, "my-test-edition") @@ -72,7 +72,7 @@ var ReturnThreeCodeLists = func(query string, bindings map[string]string, rebind // returns a slice of three graphson.Vertex(s): // - of type "unused-vertex-type" // - with a an "edition" property set to "edition_0", "edition_1", and "edition_2" respectively. -var ReturnThreeEditionVertices = func(query string, bindings map[string]string, rebindings map[string]string) (interface{}, error) { +var ReturnThreeEditionVertices = func(query string, bindings map[string]string, rebindings map[string]string) ([]graphson.Vertex, error) { editions := []graphson.Vertex{} for i := 0; i < 3; i++ { vertex := makeVertex("unused-vertex-type") @@ -86,7 +86,7 @@ var ReturnThreeEditionVertices = func(query string, bindings map[string]string, // returns a slice of three graphson.Vertex(s): // - of type "unused-vertex-type" // - with a "value" property set to "code_0", "code_1", and "code_2" respectively. -var ReturnThreeCodeVertices = func(query string, bindings map[string]string, rebindings map[string]string) (interface{}, error) { +var ReturnThreeCodeVertices = func(query string, bindings map[string]string, rebindings map[string]string) ([]graphson.Vertex, error) { codes := []graphson.Vertex{} for i := 0; i < 3; i++ { vertex := makeVertex("unused-vertex-type") @@ -99,7 +99,7 @@ var ReturnThreeCodeVertices = func(query string, bindings map[string]string, reb // ReturnThreeUselessVertices is mock implementation for NeptunePool.Get() that always // returns a slice of three graphson.Vertex(s) of type "_useless_vertex_type", and with // no properties set. -var ReturnThreeUselessVertices = func(query string, bindings map[string]string, rebindings map[string]string) (interface{}, error) { +var ReturnThreeUselessVertices = func(query string, bindings map[string]string, rebindings map[string]string) ([]graphson.Vertex, error) { codeLists := []graphson.Vertex{} for i := 0; i < 3; i++ { vertex := makeVertex("_useless_vertex_type") @@ -109,7 +109,7 @@ var ReturnThreeUselessVertices = func(query string, bindings map[string]string, } // ReturnZeroVertices provides an empty list of graphson.Vertex(s) -var ReturnZeroVertices = func(query string, bindings map[string]string, rebindings map[string]string) (interface{}, error) { +var ReturnZeroVertices = func(query string, bindings map[string]string, rebindings map[string]string) ([]graphson.Vertex, error) { return []graphson.Vertex{}, nil } diff --git a/vendor/github.com/ONSdigital/dp-graph/neptune/internal/pool.go b/vendor/github.com/ONSdigital/dp-graph/neptune/internal/pool.go index 61dbf0ff..ec532750 100644 --- a/vendor/github.com/ONSdigital/dp-graph/neptune/internal/pool.go +++ b/vendor/github.com/ONSdigital/dp-graph/neptune/internal/pool.go @@ -5,19 +5,21 @@ package internal import ( "context" + "sync" + "github.com/ONSdigital/dp-graph/neptune/driver" + "github.com/ONSdigital/graphson" "github.com/ONSdigital/gremgo-neptune" - "sync" ) var ( - lockNeptunePoolMockClose sync.RWMutex - lockNeptunePoolMockExecute sync.RWMutex - lockNeptunePoolMockGet sync.RWMutex - lockNeptunePoolMockGetCount sync.RWMutex - lockNeptunePoolMockGetE sync.RWMutex - lockNeptunePoolMockGetStringList sync.RWMutex - lockNeptunePoolMockOpenCursorCtx sync.RWMutex + lockNeptunePoolMockClose sync.RWMutex + lockNeptunePoolMockExecute sync.RWMutex + lockNeptunePoolMockGet sync.RWMutex + lockNeptunePoolMockGetCount sync.RWMutex + lockNeptunePoolMockGetE sync.RWMutex + lockNeptunePoolMockGetStringList sync.RWMutex + lockNeptunePoolMockOpenStreamCursor sync.RWMutex ) // Ensure, that NeptunePoolMock does implement NeptunePool. @@ -36,7 +38,7 @@ var _ driver.NeptunePool = &NeptunePoolMock{} // ExecuteFunc: func(query string, bindings map[string]string, rebindings map[string]string) ([]gremgo.Response, error) { // panic("mock out the Execute method") // }, -// GetFunc: func(query string, bindings map[string]string, rebindings map[string]string) (interface{}, error) { +// GetFunc: func(query string, bindings map[string]string, rebindings map[string]string) ([]graphson.Vertex, error) { // panic("mock out the Get method") // }, // GetCountFunc: func(q string, bindings map[string]string, rebindings map[string]string) (int64, error) { @@ -48,8 +50,8 @@ var _ driver.NeptunePool = &NeptunePoolMock{} // GetStringListFunc: func(query string, bindings map[string]string, rebindings map[string]string) ([]string, error) { // panic("mock out the GetStringList method") // }, -// OpenCursorCtxFunc: func(ctx context.Context, query string, bindings map[string]string, rebindings map[string]string) (*gremgo.Cursor, error) { -// panic("mock out the OpenCursorCtx method") +// OpenStreamCursorFunc: func(ctx context.Context, query string, bindings map[string]string, rebindings map[string]string) (*gremgo.Stream, error) { +// panic("mock out the OpenStreamCursor method") // }, // } // @@ -65,7 +67,7 @@ type NeptunePoolMock struct { ExecuteFunc func(query string, bindings map[string]string, rebindings map[string]string) ([]gremgo.Response, error) // GetFunc mocks the Get method. - GetFunc func(query string, bindings map[string]string, rebindings map[string]string) (interface{}, error) + GetFunc func(query string, bindings map[string]string, rebindings map[string]string) ([]graphson.Vertex, error) // GetCountFunc mocks the GetCount method. GetCountFunc func(q string, bindings map[string]string, rebindings map[string]string) (int64, error) @@ -76,8 +78,8 @@ type NeptunePoolMock struct { // GetStringListFunc mocks the GetStringList method. GetStringListFunc func(query string, bindings map[string]string, rebindings map[string]string) ([]string, error) - // OpenCursorCtxFunc mocks the OpenCursorCtx method. - OpenCursorCtxFunc func(ctx context.Context, query string, bindings map[string]string, rebindings map[string]string) (*gremgo.Cursor, error) + // OpenStreamCursorFunc mocks the OpenStreamCursor method. + OpenStreamCursorFunc func(ctx context.Context, query string, bindings map[string]string, rebindings map[string]string) (*gremgo.Stream, error) // calls tracks calls to the methods. calls struct { @@ -129,8 +131,8 @@ type NeptunePoolMock struct { // Rebindings is the rebindings argument value. Rebindings map[string]string } - // OpenCursorCtx holds details about calls to the OpenCursorCtx method. - OpenCursorCtx []struct { + // OpenStreamCursor holds details about calls to the OpenStreamCursor method. + OpenStreamCursor []struct { // Ctx is the ctx argument value. Ctx context.Context // Query is the query argument value. @@ -209,7 +211,7 @@ func (mock *NeptunePoolMock) ExecuteCalls() []struct { } // Get calls GetFunc. -func (mock *NeptunePoolMock) Get(query string, bindings map[string]string, rebindings map[string]string) (interface{}, error) { +func (mock *NeptunePoolMock) Get(query string, bindings map[string]string, rebindings map[string]string) ([]graphson.Vertex, error) { if mock.GetFunc == nil { panic("NeptunePoolMock.GetFunc: method is nil but NeptunePool.Get was just called") } @@ -364,10 +366,10 @@ func (mock *NeptunePoolMock) GetStringListCalls() []struct { return calls } -// OpenCursorCtx calls OpenCursorCtxFunc. -func (mock *NeptunePoolMock) OpenCursorCtx(ctx context.Context, query string, bindings map[string]string, rebindings map[string]string) (*gremgo.Cursor, error) { - if mock.OpenCursorCtxFunc == nil { - panic("NeptunePoolMock.OpenCursorCtxFunc: method is nil but NeptunePool.OpenCursorCtx was just called") +// OpenStreamCursor calls OpenStreamCursorFunc. +func (mock *NeptunePoolMock) OpenStreamCursor(ctx context.Context, query string, bindings map[string]string, rebindings map[string]string) (*gremgo.Stream, error) { + if mock.OpenStreamCursorFunc == nil { + panic("NeptunePoolMock.OpenStreamCursorFunc: method is nil but NeptunePool.OpenStreamCursor was just called") } callInfo := struct { Ctx context.Context @@ -380,16 +382,16 @@ func (mock *NeptunePoolMock) OpenCursorCtx(ctx context.Context, query string, bi Bindings: bindings, Rebindings: rebindings, } - lockNeptunePoolMockOpenCursorCtx.Lock() - mock.calls.OpenCursorCtx = append(mock.calls.OpenCursorCtx, callInfo) - lockNeptunePoolMockOpenCursorCtx.Unlock() - return mock.OpenCursorCtxFunc(ctx, query, bindings, rebindings) + lockNeptunePoolMockOpenStreamCursor.Lock() + mock.calls.OpenStreamCursor = append(mock.calls.OpenStreamCursor, callInfo) + lockNeptunePoolMockOpenStreamCursor.Unlock() + return mock.OpenStreamCursorFunc(ctx, query, bindings, rebindings) } -// OpenCursorCtxCalls gets all the calls that were made to OpenCursorCtx. +// OpenStreamCursorCalls gets all the calls that were made to OpenStreamCursor. // Check the length with: -// len(mockedNeptunePool.OpenCursorCtxCalls()) -func (mock *NeptunePoolMock) OpenCursorCtxCalls() []struct { +// len(mockedNeptunePool.OpenStreamCursorCalls()) +func (mock *NeptunePoolMock) OpenStreamCursorCalls() []struct { Ctx context.Context Query string Bindings map[string]string @@ -401,8 +403,8 @@ func (mock *NeptunePoolMock) OpenCursorCtxCalls() []struct { Bindings map[string]string Rebindings map[string]string } - lockNeptunePoolMockOpenCursorCtx.RLock() - calls = mock.calls.OpenCursorCtx - lockNeptunePoolMockOpenCursorCtx.RUnlock() + lockNeptunePoolMockOpenStreamCursor.RLock() + calls = mock.calls.OpenStreamCursor + lockNeptunePoolMockOpenStreamCursor.RUnlock() return calls } diff --git a/vendor/github.com/ONSdigital/dp-graph/neptune/mapper.go b/vendor/github.com/ONSdigital/dp-graph/neptune/mapper.go index 925171e8..c47c8a50 100644 --- a/vendor/github.com/ONSdigital/dp-graph/neptune/mapper.go +++ b/vendor/github.com/ONSdigital/dp-graph/neptune/mapper.go @@ -1,20 +1,30 @@ package neptune +/* +This module is dedicated to the needs of the hierarchy API. +*/ + import ( "fmt" "github.com/ONSdigital/dp-graph/neptune/query" "github.com/ONSdigital/dp-hierarchy-api/models" "github.com/ONSdigital/go-ns/log" - "github.com/gedge/graphson" + "github.com/ONSdigital/graphson" ) -func (n *NeptuneDB) convertVertexToResponse(v graphson.Vertex, instanceID, dimension string) (res *models.Response, err error) { - logData := log.Data{"fn": "convertVertexToResponse"} +func (n *NeptuneDB) buildHierarchyNodeFromGraphsonVertex(v graphson.Vertex, instanceID, dimension string, wantBreadcrumbs bool) (res *models.Response, err error) { + logData := log.Data{"fn": "buildHierarchyNodeFromGraphsonVertex"} - res = &models.Response{ - ID: v.GetID(), + res = &models.Response{} + // Note we are using the vertex' *code* property for the response model's + // ID field - because in the case of a hierarchy node, this is the ID + // used to format links. + if res.ID, err = v.GetProperty("code"); err != nil { + log.ErrorC("bad GetProp code", err, logData) + return } + if res.Label, err = v.GetLabel(); err != nil { log.ErrorC("bad label", err, logData) return @@ -27,6 +37,7 @@ func (n *NeptuneDB) convertVertexToResponse(v graphson.Vertex, instanceID, dimen log.ErrorC("bad hasData", err, logData) return } + // Fetch new data from the database concerned with the node's children. if res.NoOfChildren > 0 && instanceID != "" { var code string if code, err = v.GetProperty("code"); err != nil { @@ -57,14 +68,52 @@ func (n *NeptuneDB) convertVertexToResponse(v graphson.Vertex, instanceID, dimen res.Children = append(res.Children, childElement) } } + // Fetch new data from the database concerned with the node's breadcrumbs. + if wantBreadcrumbs { + res.Breadcrumbs, err = n.buildBreadcrumbs(instanceID, dimension, res.ID) + if err != nil { + log.ErrorC("building breadcrumbs", err, logData) + } + } return } +/* +buildBreadcrumbs launches a new query to the database, to trace the (recursive) +parentage of a hierarcy node. It converts the returned chain of parent +graphson vertices into a chain of models.Element, and returns this list of +elements. +*/ +func (n *NeptuneDB) buildBreadcrumbs(instanceID, dimension, code string) ([]*models.Element, error) { + logData := log.Data{"fn": "buildBreadcrumbs"} + gremStmt := fmt.Sprintf(query.GetAncestry, instanceID, dimension, code) + logData["statement"] = gremStmt + ancestorVertices, err := n.getVertices(gremStmt) + if err != nil { + log.ErrorC("getVertices", err, logData) + return nil, err + } + elements := []*models.Element{} + for _, ancestor := range ancestorVertices { + element, err := convertVertexToElement(ancestor) + if err != nil { + log.ErrorC("convertVertexToElement", err, logData) + return nil, err + } + elements = append(elements, element) + } + return elements, nil +} + func convertVertexToElement(v graphson.Vertex) (res *models.Element, err error) { logData := log.Data{"fn": "convertVertexToElement"} - - res = &models.Element{ - ID: v.GetID(), + res = &models.Element{} + // Note we are using the vertex' *code* property for the response model's + // ID field - because in the case of a hierarchy node, this is the ID + // used to format links. + if res.ID, err = v.GetProperty("code"); err != nil { + log.ErrorC("bad GetProp code", err, logData) + return } if res.Label, err = v.GetLabel(); err != nil { diff --git a/vendor/github.com/ONSdigital/dp-graph/neptune/neptune.go b/vendor/github.com/ONSdigital/dp-graph/neptune/neptune.go index fdc89651..453cce87 100644 --- a/vendor/github.com/ONSdigital/dp-graph/neptune/neptune.go +++ b/vendor/github.com/ONSdigital/dp-graph/neptune/neptune.go @@ -11,7 +11,7 @@ import ( "github.com/ONSdigital/dp-graph/neptune/driver" "github.com/ONSdigital/go-ns/log" - "github.com/gedge/graphson" + "github.com/ONSdigital/graphson" ) type NeptuneDB struct { @@ -106,7 +106,6 @@ func (n *NeptuneDB) getStringList(gremStmt string) (strings []string, err error) return } - func (n *NeptuneDB) getVertex(gremStmt string) (vertex graphson.Vertex, err error) { logData := log.Data{"fn": "getVertex", "statement": gremStmt} diff --git a/vendor/github.com/ONSdigital/dp-graph/neptune/observation.go b/vendor/github.com/ONSdigital/dp-graph/neptune/observation.go index 9aa1c045..93c28dd0 100644 --- a/vendor/github.com/ONSdigital/dp-graph/neptune/observation.go +++ b/vendor/github.com/ONSdigital/dp-graph/neptune/observation.go @@ -2,24 +2,27 @@ package neptune import ( "context" - "errors" "fmt" "strings" + "time" + + "github.com/pkg/errors" "github.com/ONSdigital/dp-graph/neptune/query" "github.com/ONSdigital/dp-graph/observation" "github.com/ONSdigital/dp-observation-importer/models" + "github.com/ONSdigital/go-ns/log" ) -// ErrEmptyFilter is returned if the provided filter is empty. -var ErrEmptyFilter = errors.New("filter is empty") +// ErrInvalidFilter is returned if the provided filter is nil. +var ErrInvalidFilter = errors.New("nil filter cannot be processed") func (n *NeptuneDB) StreamCSVRows(ctx context.Context, filter *observation.Filter, limit *int) (observation.StreamRowReader, error) { if filter == nil { - return nil, ErrEmptyFilter + return nil, ErrInvalidFilter } - q := fmt.Sprintf(query.GetInstanceHeader, filter.InstanceID) + q := fmt.Sprintf(query.GetInstanceHeaderPart, filter.InstanceID) q += buildObservationsQuery(filter) q += query.GetObservationSelectRowPart @@ -28,7 +31,7 @@ func (n *NeptuneDB) StreamCSVRows(ctx context.Context, filter *observation.Filte q += fmt.Sprintf(query.LimitPart, *limit) } - return n.Pool.OpenCursorCtx(ctx, q, nil, nil) + return n.Pool.OpenStreamCursor(ctx, q, nil, nil) } func buildObservationsQuery(f *observation.Filter) string { @@ -57,6 +60,51 @@ func buildObservationsQuery(f *observation.Filter) string { return q } +// TODO: this global state is only used for metrics, not actual code flow, +// but should be revisited before production use +var batchCount = 0 +var totalTime time.Time + func (n *NeptuneDB) InsertObservationBatch(ctx context.Context, attempt int, instanceID string, observations []*models.Observation, dimensionIDs map[string]string) error { + if len(observations) == 0 { + fmt.Println("range should be empty") + return nil + } + + c := batchCount + batchCount++ + batchStart := time.Now() + if totalTime.IsZero() { + totalTime = batchStart + } else { + log.Info("opening batch", log.Data{"size": len(observations), "batchID": c}) + } + + var create string + for _, o := range observations { + create += fmt.Sprintf(query.DropObservationRelationships, instanceID, o.Row) + create += fmt.Sprintf(query.DropObservation, instanceID, o.Row) + create += fmt.Sprintf(query.CreateObservationPart, instanceID, o.Row, o.RowIndex) + for _, d := range o.DimensionOptions { + dimensionName := strings.ToLower(d.DimensionName) + dimensionLookup := instanceID + "_" + dimensionName + "_" + d.Name + + nodeID, ok := dimensionIDs[dimensionLookup] + if !ok { + return fmt.Errorf("no nodeID [%s] found in dimension map", dimensionLookup) + } + + create += fmt.Sprintf(query.AddObservationRelationshipPart, nodeID, instanceID, d.DimensionName, d.Name) + } + + create = strings.TrimSuffix(create, ".outV()") + create += ".iterate() " + } + + if _, err := n.exec(create); err != nil { + return err + } + + log.Info("batch complete", log.Data{"batchID": c, "elapsed": time.Since(totalTime), "batchTime": time.Since(batchStart)}) return nil } diff --git a/vendor/github.com/ONSdigital/dp-graph/neptune/query/query.go b/vendor/github.com/ONSdigital/dp-graph/neptune/query/query.go index 76c95f8f..259adba9 100644 --- a/vendor/github.com/ONSdigital/dp-graph/neptune/query/query.go +++ b/vendor/github.com/ONSdigital/dp-graph/neptune/query/query.go @@ -47,6 +47,8 @@ const ( CloneHierarchyNodes = "g.V().hasLabel('_generic_hierarchy_node_%s').as('old')" + ".addV('_hierarchy_node_%s_%s')" + ".property('code',select('old').values('code'))" + + ".property('label',select('old').values('label'))" + + ".property(single, 'hasData', false)" + ".property('code_list','%s').as('new')" + ".addE('clone_of').to('old').select('new')" CountHierarchyNodes = "g.V().hasLabel('_hierarchy_node_%s_%s').count()" @@ -66,10 +68,11 @@ const ( // hierarchy read HierarchyExists = "g.V().hasLabel('_hierarchy_node_%s_%s').limit(1)" - GetHierarchyRoot = "g.V().hasLabel('_hierarchy_node_%s_%s').not(outE('hasParent')).limit(1)" + GetHierarchyRoot = "g.V().hasLabel('_hierarchy_node_%s_%s').not(outE('hasParent'))" GetHierarchyElement = "g.V().hasLabel('_hierarchy_node_%s_%s').has('code','%s')" GetChildren = "g.V().hasLabel('_hierarchy_node_%s_%s').has('code','%s').in('hasParent').order().by('label')" - GetAncestry = "g.V().hasLabel('_hierarchy_node_%s_%s').has('code','%s').out('hasParent')" + // Note this query is recursive + GetAncestry = "g.V().hasLabel('_hierarchy_node_%s_%s').has('code', '%s').repeat(out('hasParent')).emit()" // instance - import process CreateInstance = "g.addV('_%s_Instance').property(single,'header','%s')" @@ -86,11 +89,16 @@ const ( CreateDimensionToInstanceRelationship = "g.addV('_%s_%s').property('value','%s').as('d').addE('HAS_DIMENSION').from(V().hasLabel('_%s_Instance')).select('d').by(id)" // observation - GetInstanceHeader = "g.V().hasLabel('_%s_Instance').as('instance')" + DropObservationRelationships = "g.V().hasLabel('_%s_observation').has('value', '%s').bothE().drop().iterate()" + DropObservation = "g.V().hasLabel('_%s_observation').has('value', '%s').drop().iterate()" + CreateObservationPart = "g.addV('_%s_observation').property(single, 'value', '%s').property(single, 'rowIndex', '%d')" + AddObservationRelationshipPart = ".addE('isValueOf').to(V().hasId('%s').hasLabel('_%s_%s').where(values('value').is('%s'))).outV()" + + GetInstanceHeaderPart = "g.V().hasLabel('_%s_Instance').as('instance')" GetAllObservationsPart = ".V().hasLabel('_%s_observation').values('row')" GetObservationsPart = ".V().hasLabel('_%s_observation').match(" GetObservationDimensionPart = "__.as('row').out('isValueOf').hasLabel('_%s_%s').where(values('value').is(within(%s)))" GetObservationSelectRowPart = ".select('instance', 'row').by('header').by('row').unfold().dedup().select(values)" - LimitPart = ".limit(%v)" + LimitPart = ".limit(%d)" ) diff --git a/vendor/github.com/ONSdigital/graphson/deserialize.go b/vendor/github.com/ONSdigital/graphson/deserialize.go new file mode 100644 index 00000000..33e13ca9 --- /dev/null +++ b/vendor/github.com/ONSdigital/graphson/deserialize.go @@ -0,0 +1,249 @@ +package graphson + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" +) + +// DeserializeVertices converts a graphson string to a slice of Vertex +func DeserializeVertices(rawResponse string) ([]Vertex, error) { + // TODO: empty strings for property values will cause invalid json + // make so it can handle that case + if len(rawResponse) == 0 { + return []Vertex{}, nil + } + return DeserializeVerticesFromBytes([]byte(rawResponse)) +} + +// DeserializeVerticesFromBytes returns a slice of Vertex from the graphson rawResponse list of vertex +func DeserializeVerticesFromBytes(rawResponse []byte) ([]Vertex, error) { + // TODO: empty strings for property values will cause invalid json + // make so it can handle that case + var response []Vertex + if len(rawResponse) == 0 { + return response, nil + } + dec := json.NewDecoder(bytes.NewReader(rawResponse)) + dec.DisallowUnknownFields() + if err := dec.Decode(&response); err != nil { + return nil, err + } + return response, nil +} + +// DeserializeListOfVerticesFromBytes returns a slice of Vertex from the graphson rawResponse g:List of vertex +func DeserializeListOfVerticesFromBytes(rawResponse []byte) ([]Vertex, error) { + var metaResponse ListVertices + var response []Vertex + if len(rawResponse) == 0 { + return response, nil + } + dec := json.NewDecoder(bytes.NewReader(rawResponse)) + dec.DisallowUnknownFields() + if err := dec.Decode(&metaResponse); err != nil { + return nil, err + } + + if metaResponse.Type != "g:List" { + return response, fmt.Errorf("DeserializeListOfVerticesFromBytes: Expected `g:List` type, but got %q", metaResponse.Type) + } + + return metaResponse.Value, nil +} + +func DeserializeListOfEdgesFromBytes(rawResponse []byte) (Edges, error) { + var metaResponse ListEdges + var response Edges + if len(rawResponse) == 0 { + return response, nil + } + dec := json.NewDecoder(bytes.NewReader(rawResponse)) + dec.DisallowUnknownFields() + err := dec.Decode(&metaResponse) + if err != nil { + return nil, err + } + + if metaResponse.Type != "g:List" { + return response, fmt.Errorf("DeserializeListOfEdgesFromBytes: Expected `g:List` type, but got %q", metaResponse.Type) + } + + return metaResponse.Value, nil +} + +func DeserializeMapFromBytes(rawResponse []byte) (resMap map[string]interface{}, err error) { + var metaResponse GList + if len(rawResponse) == 0 { + return + } + dec := json.NewDecoder(bytes.NewReader(rawResponse)) + dec.DisallowUnknownFields() + if err = dec.Decode(&metaResponse); err != nil { + return nil, err + } + + if metaResponse.Type != "g:Map" { + return resMap, fmt.Errorf("DeserializeMapFromBytes: Expected `g:Map` type, but got %q", metaResponse.Type) + } + + return resMap, nil +} + +// DeserializePropertiesFromBytes is for converting vertex .properties() results into a map +func DeserializePropertiesFromBytes(rawResponse []byte, resMap map[string][]interface{}) (err error) { + var metaResponse GList + if len(rawResponse) == 0 { + return + } + dec := json.NewDecoder(bytes.NewReader(rawResponse)) + dec.DisallowUnknownFields() + if err = dec.Decode(&metaResponse); err != nil { + return + } + + if metaResponse.Type != "g:List" { + return fmt.Errorf("DeserializePropertiesFromBytes: Expected `g:List` type, but got %q", metaResponse.Type) + } + var props []VertexProperty + if err = json.Unmarshal(metaResponse.Value, &props); err != nil { + return + } + + for _, prop := range props { + if _, ok := resMap[prop.Value.Label]; !ok { + resMap[prop.Value.Label] = []interface{}{prop.Value.Value} + } else { + resMap[prop.Value.Label] = append(resMap[prop.Value.Label], prop.Value.Value) + } + } + + return +} + +// DeserializeStringListFromBytes get a g:List value which should be a a list of strings, return those +func DeserializeStringListFromBytes(rawResponse []byte) (vals []string, err error) { + var metaResponse GList + if len(rawResponse) == 0 { + err = errors.New("DeserializeStringListFromBytes: nothing to decode") + return + } + dec := json.NewDecoder(bytes.NewReader(rawResponse)) + dec.DisallowUnknownFields() + if err = dec.Decode(&metaResponse); err != nil { + return + } + + if metaResponse.Type != "g:List" { + err = fmt.Errorf("DeserializeStringListFromBytes: Expected `g:List` type, but got %q", metaResponse.Type) + return + } + + if err = json.Unmarshal(metaResponse.Value, &vals); err != nil { + return + } + return +} + +// DeserializeSingleFromBytes get a g:List value which should be a singular item, returns that item +func DeserializeSingleFromBytes(rawResponse []byte) (gV GenericValue, err error) { + var metaResponse GList + if len(rawResponse) == 0 { + err = errors.New("DeserializeSingleFromBytes: nothing to decode") + return + } + dec := json.NewDecoder(bytes.NewReader(rawResponse)) + dec.DisallowUnknownFields() + if err = dec.Decode(&metaResponse); err != nil { + return + } + + if metaResponse.Type != "g:List" { + err = fmt.Errorf("DeserializeSingleFromBytes: Expected `g:List` type, but got %q", metaResponse.Type) + return + } + + var genVals GenericValues + if genVals, err = DeserializeGenericValues(string(metaResponse.Value)); err != nil { + return + } + + if len(genVals) != 1 { + err = fmt.Errorf("DeserializeSingleFromBytes: Expected single value, got %d", len(genVals)) + return + } + + return genVals[0], nil +} + +// DeserializeNumber returns the count from the g:List'd database response +func DeserializeNumber(rawResponse []byte) (count int64, err error) { + var genVal GenericValue + if genVal, err = DeserializeSingleFromBytes(rawResponse); err != nil { + return + } + + if genVal.Type != "g:Int64" { + err = fmt.Errorf("DeserializeNumber: Expected `g:Int64` type, but got %q", genVal.Type) + return + } + count = int64(genVal.Value.(float64)) + return +} + +func DeserializeEdges(rawResponse string) (Edges, error) { + var response Edges + if rawResponse == "" { + return response, nil + } + err := json.Unmarshal([]byte(rawResponse), &response) + if err != nil { + return nil, err + } + return response, nil +} + +func DeserializeGenericValue(rawResponse string) (response GenericValue, err error) { + if len(rawResponse) == 0 { + return + } + if err = json.Unmarshal([]byte(rawResponse), &response); err != nil { + return + } + return +} + +func DeserializeGenericValues(rawResponse string) (GenericValues, error) { + var response GenericValues + if rawResponse == "" { + return response, nil + } + err := json.Unmarshal([]byte(rawResponse), &response) + if err != nil { + return nil, err + } + return response, nil +} + +func ConvertToCleanVertices(vertices []Vertex) []CleanVertex { + var responseVertices []CleanVertex + for _, vertex := range vertices { + responseVertices = append(responseVertices, CleanVertex{ + Id: vertex.Value.ID, + Label: vertex.Value.Label, + }) + } + return responseVertices +} + +func ConvertToCleanEdges(edges Edges) []CleanEdge { + var responseEdges []CleanEdge + for _, edge := range edges { + responseEdges = append(responseEdges, CleanEdge{ + Source: edge.Value.InV, + Target: edge.Value.OutV, + }) + } + return responseEdges +} diff --git a/vendor/github.com/ONSdigital/graphson/types.go b/vendor/github.com/ONSdigital/graphson/types.go new file mode 100644 index 00000000..028027eb --- /dev/null +++ b/vendor/github.com/ONSdigital/graphson/types.go @@ -0,0 +1,153 @@ +package graphson + +import "encoding/json" + +// cbi made up, not a real graphson or gremlin thing +// type GremlinResponse struct { +// V Vertices +// E Edges +// } + +type GList struct { + Type string `json:"@type"` + Value json.RawMessage `json:"@value"` +} + +// type GMap struct { +// Type string `json:"@type"` +// Value json.RawMessage `json:"@value"` +// } + +type ListVertices struct { + Type string `json:"@type"` + Value []Vertex `json:"@value"` +} +type ListEdges struct { + Type string `json:"@type"` + Value Edges `json:"@value"` +} + +// type Vertices []Vertex + +type Vertex struct { + Type string `json:"@type"` + Value VertexValue `json:"@value"` +} + +type VertexValue struct { + ID string `json:"id"` + Label string `json:"label"` + Properties map[string][]VertexProperty `json:"properties"` +} + +type VertexProperty struct { + Type string `json:"@type"` + Value VertexPropertyValue `json:"@value"` +} + +type EdgeProperty struct { + Type string `json:"@type"` + Value EdgePropertyValue `json:"@value"` +} + +type VertexPropertyValue struct { + ID GenericValue `json:"id"` + Label string `json:"label"` + Value interface{} `json:"value"` +} + +type EdgePropertyValue struct { + Label string `json:"key"` + // Value GenericValue `json:"value"` // this works when value is NOT a string + Value json.RawMessage `json:"value"` + // ValueStr string `json:"value"` + // Value interface{} `json:"value"` +} + +type GenericValues []GenericValue + +type GenericValue struct { + Type string `json:"@type"` + Value interface{} `json:"@value"` +} + +type Edges []Edge + +type Edge struct { + Type string `json:"@type"` + Value EdgeValue `json:"@value"` +} + +type EdgeValue struct { + ID string `json:"id"` + Label string `json:"label"` + InVLabel string `json:"inVLabel"` + OutVLabel string `json:"outVLabel"` + InV string `json:"inV"` + OutV string `json:"outV"` + Properties map[string]EdgeProperty `json:"properties"` +} + +// type CleanResponse struct { +// V []CleanVertex +// E []CleanEdge +// } + +type CleanEdge struct { + Source string `json:"source"` + Target string `json:"target"` +} + +type CleanVertex struct { + Id string `json:"id"` + Label string `json:"label"` +} + +// type MinVertex struct { +// ID string +// Label string +// Props map[string][]MinVertexProp +// } +// type MinVertexProp struct { +// // ID string +// Label string +// Value interface{} +// } + +// type UpsertVertexMap struct { +// Id string `json:""` +// Label string `json:"label"` +// } + +// type TypeID int + +// const ( +// TypeString TypeID = iota +// TypeBoolean +// TypeMap +// TypeCollection +// TypeClass +// TypeDate +// TypeDouble +// TypeFloat +// TypeInteger +// TypeLong +// TypeTimestamp +// TypeUUID +// TypeVertex +// TypeVertexProperty +// ) + +// const ( +// TypeStrDate = "g:Date" +// TypeStrDouble = "g:Double" +// TypeStrFloat = "g:Float" +// TypeStrInteger = "g:Int32" +// TypeStrLong = "g:Int64" +// TypeStrTimestamp = "g:Timestamp" +// TypeStrUUID = "g:UUID" +// TypeStrVertex = "g:Vertex" +// TypeStrVertexProperty = "g:VertexProperty" +// TypeStrProperty = "g:Property" +// TypeStrEdge = "g:Edge" +// ) diff --git a/vendor/github.com/ONSdigital/graphson/utils.go b/vendor/github.com/ONSdigital/graphson/utils.go new file mode 100644 index 00000000..7f55deed --- /dev/null +++ b/vendor/github.com/ONSdigital/graphson/utils.go @@ -0,0 +1,246 @@ +package graphson + +import ( + "errors" + "strings" +) + +var ( + ErrorPropertyNotFound = errors.New("property not found") + ErrorPropertyIsMeta = errors.New("meta-property found where multi-property expected") + ErrorPropertyIsMulti = errors.New("multi-property found where singleton expected") + ErrorUnexpectedPropertyType = errors.New("property value could not be cast into expected type") +) + +// GetID returns the string ID for the given vertex +func (v Vertex) GetID() string { + return v.Value.ID +} + +// GetLabels returns the []string labels for the given vertex +func (v Vertex) GetLabels() (labels []string) { + labels = append(labels, v.Value.Label) + if strings.Index(labels[0], "::") == -1 { + return + } + return strings.Split(labels[0], "::") +} + +// GetLabel returns the string label for the given vertex, or an error if >1 +func (v Vertex) GetLabel() (string, error) { + labels := v.GetLabels() + if len(labels) > 1 { + return "", errors.New("too many labels - expected one") + } + return labels[0], nil +} + +// GetMultiProperty returns the ([]string) values for the given property `key` +// will return an error if the property is not the correct type +func (v Vertex) GetMultiProperty(key string) (vals []string, err error) { + var valsInterface []interface{} + if valsInterface, err = v.GetMultiPropertyAs(key, "string"); err != nil { + return + } + for _, val := range valsInterface { + vals = append(vals, val.(string)) + } + return +} + +// GetMultiPropertyBool returns the ([]bool) values for the given property `key` +// will return an error if the property is not the correct type +func (v Vertex) GetMultiPropertyBool(key string) (vals []bool, err error) { + var valsInterface []interface{} + if valsInterface, err = v.GetMultiPropertyAs(key, "bool"); err != nil { + return + } + for _, val := range valsInterface { + vals = append(vals, val.(bool)) + } + return +} + +// GetMultiPropertyInt64 returns the ([]int64) values for the given property `key` +// will return an error if the property is not the correct type +func (v Vertex) GetMultiPropertyInt64(key string) (vals []int64, err error) { + var valsInterface []interface{} + if valsInterface, err = v.GetMultiPropertyAs(key, "int64"); err != nil { + return + } + for _, val := range valsInterface { + vals = append(vals, val.(int64)) + } + return +} + +// GetMultiPropertyInt32 returns the ([]int32) values for the given property `key` +// will return an error if the property is not the correct type +func (v Vertex) GetMultiPropertyInt32(key string) (vals []int32, err error) { + var valsInterface []interface{} + if valsInterface, err = v.GetMultiPropertyAs(key, "int32"); err != nil { + return + } + for _, val := range valsInterface { + vals = append(vals, val.(int32)) + } + return +} + +// GetMultiPropertyAs returns the values for the given property `key` as type `wantType` +// will return an error if the property is not a set of the given `wantType` (string, bool, int64) +func (v Vertex) GetMultiPropertyAs(key, wantType string) (vals []interface{}, err error) { + var valInterface []VertexProperty + var ok bool + if valInterface, ok = v.Value.Properties[key]; !ok { + err = ErrorPropertyNotFound + return + } + for _, prop := range valInterface { + if prop.Value.Label != key { + err = ErrorPropertyIsMulti + return + } + switch wantType { + + case "string": + var val string + if val, ok = prop.Value.Value.(string); !ok { + err = ErrorUnexpectedPropertyType + return + } + vals = append(vals, val) + case "bool": + var val bool + if val, ok = prop.Value.Value.(bool); !ok { + err = ErrorUnexpectedPropertyType + return + } + vals = append(vals, val) + case "int32": + var typeIf, valIf interface{} + if typeIf, ok = prop.Value.Value.(map[string]interface{})["@type"]; !ok || typeIf != "g:Int32" { + return vals, ErrorUnexpectedPropertyType + } + if valIf, ok = prop.Value.Value.(map[string]interface{})["@value"]; !ok { + return vals, ErrorUnexpectedPropertyType + } + var val float64 + if val, ok = valIf.(float64); !ok { + return vals, ErrorUnexpectedPropertyType + } + vals = append(vals, int32(val)) + case "int64": + typedPropValue := prop.Value.Value.(map[string]interface{}) + typeAsString, ok := typedPropValue["@type"] + if !ok || (typeAsString != "g:Int64" && typeAsString != "g:Int32") { + return vals, ErrorUnexpectedPropertyType + } + var valIf interface{} + if valIf, ok = prop.Value.Value.(map[string]interface{})["@value"]; !ok { + return vals, ErrorUnexpectedPropertyType + } + var val float64 + if val, ok = valIf.(float64); !ok { + return vals, ErrorUnexpectedPropertyType + } + vals = append(vals, int64(val)) + } + } + return +} + +// GetProperty returns the single string value for a given property `key` +// will return an error if the property is not a single string +func (v Vertex) GetProperty(key string) (val string, err error) { + var vals []string + if vals, err = v.GetMultiProperty(key); err != nil { + return + } + if len(vals) == 0 { + err = ErrorPropertyNotFound + return + } + if len(vals) > 1 { + err = ErrorPropertyIsMulti + return + } + return vals[0], nil +} + +// GetPropertyInt64 returns the single int64 value for a given property `key` +// will return an error if the property is not a single string +func (v Vertex) GetPropertyInt64(key string) (val int64, err error) { + var valsInterface []interface{} + if valsInterface, err = v.GetMultiPropertyAs(key, "int64"); err != nil { + return + } + if len(valsInterface) == 0 { + err = ErrorPropertyNotFound + return + } + if len(valsInterface) > 1 { + err = ErrorPropertyIsMulti + return + } + return valsInterface[0].(int64), nil +} + +// GetPropertyInt32 returns the single int32 value for a given property `key` +// will return an error if the property is not a single string +func (v Vertex) GetPropertyInt32(key string) (val int32, err error) { + var valsInterface []interface{} + if valsInterface, err = v.GetMultiPropertyAs(key, "int32"); err != nil { + return + } + if len(valsInterface) == 0 { + err = ErrorPropertyNotFound + return + } + if len(valsInterface) > 1 { + err = ErrorPropertyIsMulti + return + } + return valsInterface[0].(int32), nil +} + +// GetPropertyBool returns the single bool value for a given property `key` +// will return an error if the property is not a single string +func (v Vertex) GetPropertyBool(key string) (val bool, err error) { + var valsInterface []interface{} + if valsInterface, err = v.GetMultiPropertyAs(key, "bool"); err != nil { + return + } + if len(valsInterface) == 0 { + err = ErrorPropertyNotFound + return + } + if len(valsInterface) > 1 { + err = ErrorPropertyIsMulti + return + } + return valsInterface[0].(bool), nil +} + +// GetMetaProperty returns a map[string]string for the given property `key` +func (v Vertex) GetMetaProperty(key string) (metaMap map[string][]string, err error) { + var valInterface []VertexProperty + var ok bool + if valInterface, ok = v.Value.Properties[key]; !ok { + err = ErrorPropertyNotFound + return + } + for _, prop := range valInterface { + subKey := prop.Value.Label + var subVal string + if subVal, ok = prop.Value.Value.(string); !ok { + err = ErrorUnexpectedPropertyType + return + } + if metaMap == nil { + metaMap = make(map[string][]string) + } + metaMap[subKey] = append(metaMap[subKey], subVal) + } + return +} diff --git a/vendor/github.com/ONSdigital/graphson/validation_utils.go b/vendor/github.com/ONSdigital/graphson/validation_utils.go new file mode 100644 index 00000000..39d190c4 --- /dev/null +++ b/vendor/github.com/ONSdigital/graphson/validation_utils.go @@ -0,0 +1,94 @@ +package graphson + +import ( + "fmt" +) + +func EdgesMatch(edge1, edge2 Edge) (bool, string) { + if edge1.Type != edge2.Type { + return false, "type" + } + // if ok, reason := GenericValuesMatch(edge1.Value.ID, edge2.Value.ID); !ok { + if edge1.Value.ID != edge2.Value.ID { + return false, "id" // + reason + } + if edge1.Value.Label != edge2.Value.Label { + return false, "label" + } + // if ok, reason := GenericValuesMatch(edge1.Value.InV, edge2.Value.InV); !ok { + if edge1.Value.InV != edge2.Value.InV { + return false, "inv" // + reason + } + if edge1.Value.InVLabel != edge2.Value.InVLabel { + return false, "invlabel" + } + // if ok, reason := GenericValuesMatch(edge1.Value.OutV, edge2.Value.OutV); !ok { + if edge1.Value.OutV != edge2.Value.OutV { + return false, "outv" // + reason + } + if edge1.Value.OutVLabel != edge2.Value.OutVLabel { + return false, "outvlabel" + } + if len(edge1.Value.Properties) != len(edge2.Value.Properties) { + return false, "properties" + } + for label, edge1Props := range edge1.Value.Properties { + edge2Props := edge2.Value.Properties[label] + if edge1Props.Type != edge2Props.Type { + return false, "prop.type" + } + if edge1Props.Value.Label != edge2Props.Value.Label || + fmt.Sprintf("%v", edge1Props.Value.Label) != fmt.Sprintf("%v", edge2Props.Value.Label) { + return false, "prop.value" + } + } + return true, "" +} + +func VerticesMatch(vertex1, vertex2 Vertex) bool { + if vertex1.Type != vertex2.Type { + return false + } + if vertex1.Value.ID != vertex2.Value.ID { + return false + } + if vertex1.Value.Label != vertex2.Value.Label { + return false + } + if len(vertex1.Value.Properties) != len(vertex2.Value.Properties) { + return false + } + for label, vertex1Props := range vertex1.Value.Properties { + vertex2Props := vertex2.Value.Properties[label] + if len(vertex1Props) != len(vertex2Props) { + return false + + } + for i, vertex1PropsElement := range vertex1Props { + vertex2PropsElement := vertex2Props[i] + if vertex1PropsElement.Type != vertex2PropsElement.Type { + return false + } + if vertex1PropsElement.Value.ID.Type != vertex2PropsElement.Value.ID.Type || + fmt.Sprintf("%v", vertex1PropsElement.Value.ID.Value) != fmt.Sprintf("%v", vertex2PropsElement.Value.ID.Value) { + return false + } + if vertex1PropsElement.Value.Label != vertex2PropsElement.Value.Label { + return false + } + if fmt.Sprintf("%v", vertex1PropsElement.Value.Value) != fmt.Sprintf("%v", vertex2PropsElement.Value.Value) { + return false + } + } + } + return true +} + +func GenericValuesMatch(gv1, gv2 GenericValue) (bool, string) { + if gv1.Type != gv2.Type { + return false, "type" + } + gv1ValueString := fmt.Sprintf("%v", gv1.Value) + gv2ValueString := fmt.Sprintf("%v", gv2.Value) + return gv1ValueString == gv2ValueString, "value" +} diff --git a/vendor/github.com/ONSdigital/gremgo-neptune/Makefile b/vendor/github.com/ONSdigital/gremgo-neptune/Makefile index cfc07d91..26f49710 100644 --- a/vendor/github.com/ONSdigital/gremgo-neptune/Makefile +++ b/vendor/github.com/ONSdigital/gremgo-neptune/Makefile @@ -13,9 +13,9 @@ test: .PHONY: test-bench test-bench: - @go test -bench=. -race + @go test -v -bench=. -race .PHONY: gremlin gremlin: @docker build -t gremgo-neptune/gremlin-server -f ./Dockerfile.gremlin . - @docker run -p 8182:8182 -t gremgo-neptune/gremlin-server \ No newline at end of file + @docker run -p 8182:8182 -t gremgo-neptune/gremlin-server diff --git a/vendor/github.com/ONSdigital/gremgo-neptune/client.go b/vendor/github.com/ONSdigital/gremgo-neptune/client.go index f95c0eac..26a230cd 100644 --- a/vendor/github.com/ONSdigital/gremgo-neptune/client.go +++ b/vendor/github.com/ONSdigital/gremgo-neptune/client.go @@ -10,7 +10,7 @@ import ( "sync" "time" - "github.com/gedge/graphson" + "github.com/ONSdigital/graphson" "github.com/pkg/errors" ) @@ -130,8 +130,7 @@ func (c *Client) executeRequestCursorCtx(ctx context.Context, query string, bind } cursor = &Cursor{ - ID: id, - client: c, + ID: id, } return } @@ -213,7 +212,21 @@ func (c *Client) deserializeResponseToVertices(resp []Response) (res []graphson. return } -// OpenCursorCtx initiates a query on the database, returning a cursor used to iterate over the results as they arrive +// OpenStreamCursor initiates a query on the database, returning a stream cursor used to iterate over the results as they arrive. +// The provided query must only return a string list, as the Read() function on Stream explicitly handles string values. +func (c *Client) OpenStreamCursor(ctx context.Context, query string, bindings, rebindings map[string]string) (*Stream, error) { + if c.conn.IsDisposed() { + return nil, ErrorConnectionDisposed + } + basicCursor, err := c.executeRequestCursorCtx(ctx, query, bindings, rebindings) + return &Stream{ + cursor: basicCursor, + client: c, + }, err +} + +// OpenCursorCtx initiates a query on the database, returning a cursor used to iterate over the results as they arrive. +// The provided query must return a vertex or list of vertices in order for ReadCursorCtx to correctly format the results. func (c *Client) OpenCursorCtx(ctx context.Context, query string, bindings, rebindings map[string]string) (cursor *Cursor, err error) { if c.conn.IsDisposed() { err = ErrorConnectionDisposed diff --git a/vendor/github.com/ONSdigital/gremgo-neptune/cursor.go b/vendor/github.com/ONSdigital/gremgo-neptune/cursor.go index 5351bc74..097850c1 100644 --- a/vendor/github.com/ONSdigital/gremgo-neptune/cursor.go +++ b/vendor/github.com/ONSdigital/gremgo-neptune/cursor.go @@ -3,83 +3,84 @@ package gremgo import ( "context" "io" - "sync" + "net/http" - "github.com/davecgh/go-spew/spew" - "github.com/gedge/graphson" + "github.com/ONSdigital/graphson" "github.com/pkg/errors" ) // Cursor allows for results to be iterated over as soon as available, rather than waiting for // a query to complete and all results to be returned in one block. type Cursor struct { - ID string - mu sync.RWMutex + ID string +} + +// Stream is a specific implementation of a Cursor, which iterates over results from a cursor but +// only works on queries which return a list of strings. This is designed for returning what would +// be considered 'rows' of data in other contexts. +type Stream struct { + cursor *Cursor eof bool buffer []string client *Client } -// Read a string response from the cursor, reading from the buffer of previously retrieved responses -// when possible. When the buffer is empty, Read uses the cursor's client to retrieve further -// responses from the database. As this function does not take context, a number of attempts -// is hardcoded in refillBuffer() to prevent an infinite wait for further responses. -func (c *Cursor) Read() (string, error) { - if len(c.buffer) == 0 { - if c.eof { +// Read a string response from the stream cursor, reading from the buffer of previously retrieved responses +// when possible. When the buffer is empty, Read uses the stream's client to retrieve further +// responses from the database. +func (s *Stream) Read() (string, error) { + if len(s.buffer) == 0 { + if s.eof { return "", io.EOF } - if err := c.refillBuffer(); err != nil { + if err := s.refillBuffer(); err != nil { return "", err } } - s := c.buffer[0] + "\n" - spew.Dump("cursor string: " + s) - - if len(c.buffer) > 1 { - c.buffer = c.buffer[1:] - } else { - c.buffer = []string{} - } + var row string + row, s.buffer = s.buffer[0], s.buffer[1:] + row += "\n" - return s, nil + return row, nil } -func (c *Cursor) refillBuffer() error { +func (s *Stream) refillBuffer() error { var resp []Response var err error - var attempts int - for resp == nil && !c.eof || attempts > 5 { //resp could be empty if reading too quickly - attempts++ - if resp, c.eof, err = c.client.retrieveNextResponseCtx(context.Background(), c); err != nil { - err = errors.Wrapf(err, "cursor.Read: %s", c.ID) - return err + for resp == nil && !s.eof { //resp could be empty if reading too quickly + if resp, s.eof, err = s.client.retrieveNextResponseCtx(context.Background(), s.cursor); err != nil { + return errors.Wrapf(err, "cursor.Read: %s", s.cursor.ID) + } + + if len(resp) > 1 { + return errors.New("too many results in cursor response") } - } - //gremlin has returned a validly formed 'no content' response - if len(resp) == 1 && &resp[0].Status != nil && resp[0].Status.Code == 204 { - return io.ErrUnexpectedEOF + //gremlin has returned a validly formed 'no content' response + if len(resp) == 1 && &resp[0].Status != nil && resp[0].Status.Code == http.StatusNoContent { + s.eof = true + return io.EOF + } } - if c.buffer, err = graphson.DeserializeStringListFromBytes(resp[0].Result.Data); err != nil { + if s.buffer, err = graphson.DeserializeStringListFromBytes(resp[0].Result.Data); err != nil { return err } - if len(c.buffer) == 0 { + if len(s.buffer) == 0 { return errors.New("no results deserialized") } return nil } -// Close satisfies the ReadCloser interface. The cursor does not need to close any -// resources, as the contained client holds the connection, and this is closed -// by the defered close in OpenCursorCtx -func (c *Cursor) Close(ctx context.Context) error { +// Close satisfies the Closer interface. The stream does not need to close any +// resources, as the contained client holds the connection and is responsible +// for closing its own resources. +func (s *Stream) Close(ctx context.Context) error { return nil } diff --git a/vendor/github.com/ONSdigital/gremgo-neptune/go.mod b/vendor/github.com/ONSdigital/gremgo-neptune/go.mod index a9d49683..2ef7ab83 100644 --- a/vendor/github.com/ONSdigital/gremgo-neptune/go.mod +++ b/vendor/github.com/ONSdigital/gremgo-neptune/go.mod @@ -1,7 +1,7 @@ module github.com/ONSdigital/gremgo-neptune require ( - github.com/gedge/graphson v0.0.0-20190531092426-d39cb8fe4384 + github.com/ONSdigital/graphson v0.0.0-20190718134034-c13ceacd109d github.com/gofrs/uuid v3.2.0+incompatible github.com/gorilla/websocket v1.4.0 github.com/pkg/errors v0.8.1 diff --git a/vendor/github.com/ONSdigital/gremgo-neptune/go.sum b/vendor/github.com/ONSdigital/gremgo-neptune/go.sum index a672d64d..c16ab732 100644 --- a/vendor/github.com/ONSdigital/gremgo-neptune/go.sum +++ b/vendor/github.com/ONSdigital/gremgo-neptune/go.sum @@ -1,5 +1,9 @@ -github.com/gedge/graphson v0.0.0-20190531092426-d39cb8fe4384 h1:WnFZkCrqH8PJFxQtp3EG0GKcEneNwqS3hzYDr6d7ctE= -github.com/gedge/graphson v0.0.0-20190531092426-d39cb8fe4384/go.mod h1:Ehgz7wAEVmSkFMIY2WFsi33IZXvzrgBVsro51AEIkq0= +github.com/ONSdigital/graphson v0.0.0-20190531092426-d39cb8fe4384 h1:tQOaBPntKLKJZYNTqT6YwE9fXZZLD0jBrke18nAJV5w= +github.com/ONSdigital/graphson v0.0.0-20190531092426-d39cb8fe4384/go.mod h1:zQ+8pTnCLGuy4eUek81pWUxZo4/f71ri3VYz97Wby+4= +github.com/ONSdigital/graphson v0.0.0-20190717101729-324718b3a644 h1:qlXGwzq+X2DUd0iOYmkXwnSxYDeU1efFwp7sUXASjO0= +github.com/ONSdigital/graphson v0.0.0-20190717101729-324718b3a644/go.mod h1:zQ+8pTnCLGuy4eUek81pWUxZo4/f71ri3VYz97Wby+4= +github.com/ONSdigital/graphson v0.0.0-20190718134034-c13ceacd109d h1:yrCtEGlohmA3OnXtke0nOOp/m9O83orpSnTGOfYOw1Q= +github.com/ONSdigital/graphson v0.0.0-20190718134034-c13ceacd109d/go.mod h1:zQ+8pTnCLGuy4eUek81pWUxZo4/f71ri3VYz97Wby+4= github.com/gofrs/uuid v3.2.0+incompatible h1:y12jRkkFxsd7GpqdSZ+/KCs/fJbqpEXSGd4+jfEaewE= github.com/gofrs/uuid v3.2.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8= diff --git a/vendor/github.com/ONSdigital/gremgo-neptune/pool.go b/vendor/github.com/ONSdigital/gremgo-neptune/pool.go index 74073257..41807159 100644 --- a/vendor/github.com/ONSdigital/gremgo-neptune/pool.go +++ b/vendor/github.com/ONSdigital/gremgo-neptune/pool.go @@ -7,7 +7,7 @@ import ( "sync" "time" - "github.com/gedge/graphson" + "github.com/ONSdigital/graphson" "github.com/pkg/errors" ) @@ -382,7 +382,7 @@ func (p *Pool) AddVertexCtx(ctx context.Context, label string, i interface{}, bi } // Get -func (p *Pool) Get(query string, bindings, rebindings map[string]string) (resp interface{}, err error) { +func (p *Pool) Get(query string, bindings, rebindings map[string]string) (resp []graphson.Vertex, err error) { var pc *conn if pc, err = p.conn(); err != nil { return resp, errors.Wrap(err, "Failed p.conn") @@ -392,7 +392,7 @@ func (p *Pool) Get(query string, bindings, rebindings map[string]string) (resp i } // GetCtx -func (p *Pool) GetCtx(ctx context.Context, query string, bindings, rebindings map[string]string) (resp interface{}, err error) { +func (p *Pool) GetCtx(ctx context.Context, query string, bindings, rebindings map[string]string) (resp []graphson.Vertex, err error) { var pc *conn if pc, err = p.connCtx(ctx); err != nil { return resp, errors.Wrap(err, "GetCtx: Failed p.connCtx") @@ -401,6 +401,17 @@ func (p *Pool) GetCtx(ctx context.Context, query string, bindings, rebindings ma return pc.Client.GetCtx(ctx, query, bindings, rebindings) } +// OpenStreamCursor initiates a query on the database, returning a stream to iterate over the results +func (p *Pool) OpenStreamCursor(ctx context.Context, query string, bindings, rebindings map[string]string) (stream *Stream, err error) { + var pc *conn + if pc, err = p.connCtx(ctx); err != nil { + err = errors.Wrap(err, "OpenStreamCursor: Failed p.connCtx") + return + } + defer p.putConn(pc, err) + return pc.Client.OpenStreamCursor(ctx, query, bindings, rebindings) +} + // OpenCursorCtx initiates a query on the database, returning a cursor to iterate over the results func (p *Pool) OpenCursorCtx(ctx context.Context, query string, bindings, rebindings map[string]string) (cursor *Cursor, err error) { var pc *conn diff --git a/vendor/vendor.json b/vendor/vendor.json index cc622fd2..dcd62d84 100644 --- a/vendor/vendor.json +++ b/vendor/vendor.json @@ -23,86 +23,86 @@ { "checksumSHA1": "4hOuaa2bRo+okR3xsWFCTeo1NNc=", "path": "github.com/ONSdigital/dp-graph/config", - "revision": "2d25f6c0a2b8eabd83fa292b374c305ad3ec8075", - "revisionTime": "2019-07-11T10:42:33Z" + "revision": "573b5a7a0f4e52bc3aa7a7a0d283111cb139d06e", + "revisionTime": "2019-08-02T16:47:15Z" }, { "checksumSHA1": "Qg+JFeVzRnx1l8wM3ZE04gLExcs=", "path": "github.com/ONSdigital/dp-graph/graph", - "revision": "2d25f6c0a2b8eabd83fa292b374c305ad3ec8075", - "revisionTime": "2019-07-11T10:42:33Z" + "revision": "573b5a7a0f4e52bc3aa7a7a0d283111cb139d06e", + "revisionTime": "2019-08-02T16:47:15Z" }, { "checksumSHA1": "263RXdCaG1tXdn57RTXlzP1TYS8=", "path": "github.com/ONSdigital/dp-graph/graph/driver", - "revision": "2d25f6c0a2b8eabd83fa292b374c305ad3ec8075", - "revisionTime": "2019-07-11T10:42:33Z" + "revision": "573b5a7a0f4e52bc3aa7a7a0d283111cb139d06e", + "revisionTime": "2019-08-02T16:47:15Z" }, { "checksumSHA1": "PyMyKOBc90I+59WEPLD6Aovpv/4=", "path": "github.com/ONSdigital/dp-graph/mock", - "revision": "2d25f6c0a2b8eabd83fa292b374c305ad3ec8075", - "revisionTime": "2019-07-11T10:42:33Z" + "revision": "573b5a7a0f4e52bc3aa7a7a0d283111cb139d06e", + "revisionTime": "2019-08-02T16:47:15Z" }, { "checksumSHA1": "rlvdEFCCXvGZW0R5HBM0FEEhN38=", "path": "github.com/ONSdigital/dp-graph/neo4j", - "revision": "2d25f6c0a2b8eabd83fa292b374c305ad3ec8075", - "revisionTime": "2019-07-11T10:42:33Z" + "revision": "573b5a7a0f4e52bc3aa7a7a0d283111cb139d06e", + "revisionTime": "2019-08-02T16:47:15Z" }, { "checksumSHA1": "u0TDiIBM7IO/NZDXOvEwm3xbqvw=", "path": "github.com/ONSdigital/dp-graph/neo4j/mapper", - "revision": "2d25f6c0a2b8eabd83fa292b374c305ad3ec8075", - "revisionTime": "2019-07-11T10:42:33Z" + "revision": "573b5a7a0f4e52bc3aa7a7a0d283111cb139d06e", + "revisionTime": "2019-08-02T16:47:15Z" }, { "checksumSHA1": "fWNLT90j5B/ygFLfYpUB+TMoMaA=", "path": "github.com/ONSdigital/dp-graph/neo4j/neo4jdriver", - "revision": "2d25f6c0a2b8eabd83fa292b374c305ad3ec8075", - "revisionTime": "2019-07-11T10:42:33Z" + "revision": "573b5a7a0f4e52bc3aa7a7a0d283111cb139d06e", + "revisionTime": "2019-08-02T16:47:15Z" }, { "checksumSHA1": "QV5LGj4rOKWT5tvkZdzixj4D2OM=", "path": "github.com/ONSdigital/dp-graph/neo4j/query", - "revision": "2d25f6c0a2b8eabd83fa292b374c305ad3ec8075", - "revisionTime": "2019-07-11T10:42:33Z" + "revision": "573b5a7a0f4e52bc3aa7a7a0d283111cb139d06e", + "revisionTime": "2019-08-02T16:47:15Z" }, { - "checksumSHA1": "CVWm0P5TBbtKwNY6VZ4+TPvfcmw=", + "checksumSHA1": "Ww/x2y8TYmMSyoXW+YQ8OpXw0DU=", "path": "github.com/ONSdigital/dp-graph/neptune", - "revision": "2d25f6c0a2b8eabd83fa292b374c305ad3ec8075", - "revisionTime": "2019-07-11T10:42:33Z" + "revision": "573b5a7a0f4e52bc3aa7a7a0d283111cb139d06e", + "revisionTime": "2019-08-02T16:47:15Z" }, { - "checksumSHA1": "Z5TQx5pNn90l8zjeSbsSgc9hqP8=", + "checksumSHA1": "c8Q5ZPmuDWCGuJWUYMEj3gWNx2c=", "path": "github.com/ONSdigital/dp-graph/neptune/driver", - "revision": "2d25f6c0a2b8eabd83fa292b374c305ad3ec8075", - "revisionTime": "2019-07-11T10:42:33Z" + "revision": "573b5a7a0f4e52bc3aa7a7a0d283111cb139d06e", + "revisionTime": "2019-08-02T16:47:15Z" }, { - "checksumSHA1": "WdzbL61yx299fxN4CYuMx/rJd5w=", + "checksumSHA1": "hDxHE2IdkTwq79uP85i5nnO/Exc=", "path": "github.com/ONSdigital/dp-graph/neptune/internal", - "revision": "2d25f6c0a2b8eabd83fa292b374c305ad3ec8075", - "revisionTime": "2019-07-11T10:42:33Z" + "revision": "573b5a7a0f4e52bc3aa7a7a0d283111cb139d06e", + "revisionTime": "2019-08-02T16:47:15Z" }, { - "checksumSHA1": "zLyHRijgYcow+/o4TELraqBrhiU=", + "checksumSHA1": "3xojQzvkGmJaZenf+/irFriKoNk=", "path": "github.com/ONSdigital/dp-graph/neptune/query", - "revision": "2d25f6c0a2b8eabd83fa292b374c305ad3ec8075", - "revisionTime": "2019-07-11T10:42:33Z" + "revision": "573b5a7a0f4e52bc3aa7a7a0d283111cb139d06e", + "revisionTime": "2019-08-02T16:47:15Z" }, { "checksumSHA1": "DRohWZAkyrLLQ5B5yq/bngMZbgM=", "path": "github.com/ONSdigital/dp-graph/observation", - "revision": "2d25f6c0a2b8eabd83fa292b374c305ad3ec8075", - "revisionTime": "2019-07-11T10:42:33Z" + "revision": "573b5a7a0f4e52bc3aa7a7a0d283111cb139d06e", + "revisionTime": "2019-08-02T16:47:15Z" }, { "checksumSHA1": "ZVHJ3XHyBXaEFq1DN04u4+z6nP4=", "path": "github.com/ONSdigital/dp-graph/observation/observationtest", - "revision": "2d25f6c0a2b8eabd83fa292b374c305ad3ec8075", - "revisionTime": "2019-07-11T10:42:33Z" + "revision": "573b5a7a0f4e52bc3aa7a7a0d283111cb139d06e", + "revisionTime": "2019-08-02T16:47:15Z" }, { "checksumSHA1": "sX7Krcb3Xi+QkzybgFfBj6hJ81A=", @@ -249,10 +249,16 @@ "revisionTime": "2019-02-28T15:33:39Z" }, { - "checksumSHA1": "xgjhvEt1uFeFa0DlYJQHvb/b0uk=", + "checksumSHA1": "gNgCahX+DGC9mQ7+i7yw0QCCgmA=", + "path": "github.com/ONSdigital/graphson", + "revision": "c13ceacd109d16904e93b8cf3fa8d7fc8569aa8f", + "revisionTime": "2019-07-18T13:40:34Z" + }, + { + "checksumSHA1": "gssBNtoM4Aw4vs4psO1vYu3HEhw=", "path": "github.com/ONSdigital/gremgo-neptune", - "revision": "8103e7ca8a44d85458fc10506f35bea78ecba754", - "revisionTime": "2019-07-12T13:38:19Z" + "revision": "d65314667aa09cfea5be4477a2b68f29d14a3ddb", + "revisionTime": "2019-08-01T15:10:53Z" }, { "checksumSHA1": "+Jp0tVXfQ1TM8T+oun82oJtME5U=", From 5bf21dafe285b8ae7b12541271be29df9885a785 Mon Sep 17 00:00:00 2001 From: Eleanor Deal Date: Tue, 6 Aug 2019 17:27:44 +0100 Subject: [PATCH 04/15] Update vendor for complete dp-graph neptune functionality --- Makefile | 5 +- mocks/observation_store.go | 9 +- .../ONSdigital/dp-filter/LICENSE.md | 9 - .../dp-filter/observation/filter.go | 52 -- .../dp-filter/observation/reader.go | 71 -- .../dp-filter/observation/row_reader.go | 88 -- .../ONSdigital/dp-filter/observation/store.go | 105 --- .../ONSdigital/dp-graph/neptune/dimension.go | 28 +- .../ONSdigital/dp-graph/neptune/hierarchy.go | 3 +- .../ONSdigital/dp-graph/neptune/instance.go | 127 ++- .../dp-graph/neptune/internal/pool.go | 3 +- .../ONSdigital/dp-graph/neptune/neptune.go | 3 +- .../dp-graph/neptune/observation.go | 29 +- .../dp-graph/neptune/query/query.go | 19 +- .../ONSdigital/gremgo-neptune/client.go | 4 +- .../ONSdigital/gremgo-neptune/response.go | 44 +- .../github.com/gedge/graphson/deserialize.go | 246 ----- vendor/github.com/gedge/graphson/types.go | 153 ---- vendor/github.com/gedge/graphson/utils.go | 238 ----- .../gedge/graphson/validation_utils.go | 94 -- .../golang-neo4j-bolt-driver/LICENSE | 21 - .../golang-neo4j-bolt-driver/README.md | 245 ----- .../golang-neo4j-bolt-driver/conn.go | 849 ------------------ .../golang-neo4j-bolt-driver/doc.go | 95 -- .../golang-neo4j-bolt-driver/driver.go | 186 ---- .../encoding/decoder.go | 529 ----------- .../golang-neo4j-bolt-driver/encoding/doc.go | 2 - .../encoding/encoder.go | 468 ---------- .../golang-neo4j-bolt-driver/encoding/util.go | 62 -- .../golang-neo4j-bolt-driver/errors/doc.go | 2 - .../golang-neo4j-bolt-driver/errors/errors.go | 80 -- .../golang-neo4j-bolt-driver/log/doc.go | 6 - .../golang-neo4j-bolt-driver/log/log.go | 112 --- .../golang-neo4j-bolt-driver/recorder.go | 291 ------ .../golang-neo4j-bolt-driver/result.go | 69 -- .../golang-neo4j-bolt-driver/rows.go | 289 ------ .../golang-neo4j-bolt-driver/stmt.go | 243 ----- .../structures/doc.go | 2 - .../structures/graph/doc.go | 2 - .../structures/graph/node.go | 27 - .../structures/graph/path.go | 35 - .../structures/graph/relationship.go | 25 - .../structures/graph/unbound_relationship.go | 23 - .../structures/messages/ack_failure.go | 24 - .../structures/messages/discard_all.go | 24 - .../structures/messages/doc.go | 2 - .../structures/messages/failure.go | 36 - .../structures/messages/ignored.go | 24 - .../structures/messages/init.go | 43 - .../structures/messages/pull_all.go | 24 - .../structures/messages/record.go | 28 - .../structures/messages/reset.go | 24 - .../structures/messages/run.go | 30 - .../structures/messages/success.go | 28 - .../structures/structures.go | 7 - .../golang-neo4j-bolt-driver/tx.go | 96 -- .../golang-neo4j-bolt-driver/util.go | 52 -- vendor/vendor.json | 122 +-- 58 files changed, 250 insertions(+), 5307 deletions(-) delete mode 100644 vendor/github.com/ONSdigital/dp-filter/LICENSE.md delete mode 100644 vendor/github.com/ONSdigital/dp-filter/observation/filter.go delete mode 100644 vendor/github.com/ONSdigital/dp-filter/observation/reader.go delete mode 100644 vendor/github.com/ONSdigital/dp-filter/observation/row_reader.go delete mode 100644 vendor/github.com/ONSdigital/dp-filter/observation/store.go delete mode 100644 vendor/github.com/gedge/graphson/deserialize.go delete mode 100644 vendor/github.com/gedge/graphson/types.go delete mode 100644 vendor/github.com/gedge/graphson/utils.go delete mode 100644 vendor/github.com/gedge/graphson/validation_utils.go delete mode 100644 vendor/github.com/johnnadratowski/golang-neo4j-bolt-driver/LICENSE delete mode 100644 vendor/github.com/johnnadratowski/golang-neo4j-bolt-driver/README.md delete mode 100644 vendor/github.com/johnnadratowski/golang-neo4j-bolt-driver/conn.go delete mode 100644 vendor/github.com/johnnadratowski/golang-neo4j-bolt-driver/doc.go delete mode 100644 vendor/github.com/johnnadratowski/golang-neo4j-bolt-driver/driver.go delete mode 100644 vendor/github.com/johnnadratowski/golang-neo4j-bolt-driver/encoding/decoder.go delete mode 100644 vendor/github.com/johnnadratowski/golang-neo4j-bolt-driver/encoding/doc.go delete mode 100644 vendor/github.com/johnnadratowski/golang-neo4j-bolt-driver/encoding/encoder.go delete mode 100644 vendor/github.com/johnnadratowski/golang-neo4j-bolt-driver/encoding/util.go delete mode 100644 vendor/github.com/johnnadratowski/golang-neo4j-bolt-driver/errors/doc.go delete mode 100644 vendor/github.com/johnnadratowski/golang-neo4j-bolt-driver/errors/errors.go delete mode 100644 vendor/github.com/johnnadratowski/golang-neo4j-bolt-driver/log/doc.go delete mode 100644 vendor/github.com/johnnadratowski/golang-neo4j-bolt-driver/log/log.go delete mode 100644 vendor/github.com/johnnadratowski/golang-neo4j-bolt-driver/recorder.go delete mode 100644 vendor/github.com/johnnadratowski/golang-neo4j-bolt-driver/result.go delete mode 100644 vendor/github.com/johnnadratowski/golang-neo4j-bolt-driver/rows.go delete mode 100644 vendor/github.com/johnnadratowski/golang-neo4j-bolt-driver/stmt.go delete mode 100644 vendor/github.com/johnnadratowski/golang-neo4j-bolt-driver/structures/doc.go delete mode 100644 vendor/github.com/johnnadratowski/golang-neo4j-bolt-driver/structures/graph/doc.go delete mode 100644 vendor/github.com/johnnadratowski/golang-neo4j-bolt-driver/structures/graph/node.go delete mode 100644 vendor/github.com/johnnadratowski/golang-neo4j-bolt-driver/structures/graph/path.go delete mode 100644 vendor/github.com/johnnadratowski/golang-neo4j-bolt-driver/structures/graph/relationship.go delete mode 100644 vendor/github.com/johnnadratowski/golang-neo4j-bolt-driver/structures/graph/unbound_relationship.go delete mode 100644 vendor/github.com/johnnadratowski/golang-neo4j-bolt-driver/structures/messages/ack_failure.go delete mode 100644 vendor/github.com/johnnadratowski/golang-neo4j-bolt-driver/structures/messages/discard_all.go delete mode 100644 vendor/github.com/johnnadratowski/golang-neo4j-bolt-driver/structures/messages/doc.go delete mode 100644 vendor/github.com/johnnadratowski/golang-neo4j-bolt-driver/structures/messages/failure.go delete mode 100644 vendor/github.com/johnnadratowski/golang-neo4j-bolt-driver/structures/messages/ignored.go delete mode 100644 vendor/github.com/johnnadratowski/golang-neo4j-bolt-driver/structures/messages/init.go delete mode 100644 vendor/github.com/johnnadratowski/golang-neo4j-bolt-driver/structures/messages/pull_all.go delete mode 100644 vendor/github.com/johnnadratowski/golang-neo4j-bolt-driver/structures/messages/record.go delete mode 100644 vendor/github.com/johnnadratowski/golang-neo4j-bolt-driver/structures/messages/reset.go delete mode 100644 vendor/github.com/johnnadratowski/golang-neo4j-bolt-driver/structures/messages/run.go delete mode 100644 vendor/github.com/johnnadratowski/golang-neo4j-bolt-driver/structures/messages/success.go delete mode 100644 vendor/github.com/johnnadratowski/golang-neo4j-bolt-driver/structures/structures.go delete mode 100644 vendor/github.com/johnnadratowski/golang-neo4j-bolt-driver/tx.go delete mode 100644 vendor/github.com/johnnadratowski/golang-neo4j-bolt-driver/util.go diff --git a/Makefile b/Makefile index 51f98f8d..f13961bd 100644 --- a/Makefile +++ b/Makefile @@ -7,13 +7,16 @@ BIN_DIR?=. export GOOS?=$(shell go env GOOS) export GOARCH?=$(shell go env GOARCH) +export GRAPH_DRIVER_TYPE?="neptune" +export GRAPH_ADDR?="ws://localhost:8182/gremlin" + export ENABLE_PRIVATE_ENDPOINTS?=true build: @mkdir -p $(BUILD_ARCH)/$(BIN_DIR) go build -o $(BUILD_ARCH)/$(BIN_DIR)/dp-dataset-api main.go debug: - GRAPH_DRIVER_TYPE="neptune" GRAPH_ADDR="ws://localhost:8182/gremlin" HUMAN_LOG=1 go run main.go + HUMAN_LOG=1 go run main.go acceptance-publishing: build ENABLE_PRIVATE_ENDPOINTS=true MONGODB_DATABASE=test HUMAN_LOG=1 go run main.go acceptance-web: build diff --git a/mocks/observation_store.go b/mocks/observation_store.go index aaaf486e..970de597 100755 --- a/mocks/observation_store.go +++ b/mocks/observation_store.go @@ -4,8 +4,9 @@ package mocks import ( - "github.com/ONSdigital/dp-filter/observation" "sync" + + "github.com/ONSdigital/dp-graph/observation" ) var ( @@ -18,7 +19,7 @@ var ( // // // make and configure a mocked ObservationStore // mockedObservationStore := &ObservationStoreMock{ -// GetCSVRowsFunc: func(filter *observation.Filter, limit *int) (observation.CSVRowReader, error) { +// GetCSVRowsFunc: func(filter *observation.Filter, limit *int) (observation.StreamRowReader, error) { // panic("TODO: mock out the GetCSVRows method") // }, // } @@ -29,7 +30,7 @@ var ( // } type ObservationStoreMock struct { // GetCSVRowsFunc mocks the GetCSVRows method. - GetCSVRowsFunc func(filter *observation.Filter, limit *int) (observation.CSVRowReader, error) + GetCSVRowsFunc func(filter *observation.Filter, limit *int) (observation.StreamRowReader, error) // calls tracks calls to the methods. calls struct { @@ -44,7 +45,7 @@ type ObservationStoreMock struct { } // GetCSVRows calls GetCSVRowsFunc. -func (mock *ObservationStoreMock) GetCSVRows(filter *observation.Filter, limit *int) (observation.CSVRowReader, error) { +func (mock *ObservationStoreMock) GetCSVRows(filter *observation.Filter, limit *int) (observation.StreamRowReader, error) { if mock.GetCSVRowsFunc == nil { panic("ObservationStoreMock.GetCSVRowsFunc: method is nil but ObservationStore.GetCSVRows was just called") } diff --git a/vendor/github.com/ONSdigital/dp-filter/LICENSE.md b/vendor/github.com/ONSdigital/dp-filter/LICENSE.md deleted file mode 100644 index 2a9b0e35..00000000 --- a/vendor/github.com/ONSdigital/dp-filter/LICENSE.md +++ /dev/null @@ -1,9 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2016-2017 Office for National Statistics - -Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/ONSdigital/dp-filter/observation/filter.go b/vendor/github.com/ONSdigital/dp-filter/observation/filter.go deleted file mode 100644 index 82029f94..00000000 --- a/vendor/github.com/ONSdigital/dp-filter/observation/filter.go +++ /dev/null @@ -1,52 +0,0 @@ -package observation - -// Boolean indicators for publish flag -var ( - Published = true - Unpublished = false -) - -// Filter represents a structure for a filter job -type Filter struct { - FilterID string `json:"filter_id,omitempty"` - InstanceID string `json:"instance_id"` - DimensionFilters []*DimensionFilter `json:"dimensions,omitempty"` - Published *bool `json:"published,omitempty"` - Downloads *Downloads `json:"downloads,omitempty"` -} - -// DimensionFilter represents an object containing a list of dimension values and the dimension name -type DimensionFilter struct { - Name string `json:"name,omitempty"` - Options []string `json:"options,omitempty"` -} - -// Downloads represent a list of download types -type Downloads struct { - CSV *DownloadItem `json:"csv,omitempty"` - XLS *DownloadItem `json:"xls,omitempty"` -} - -// DownloadItem represents an object containing download details -type DownloadItem struct { - HRef string `json:"href,omitempty"` - Private string `json:"private,omitempty"` - Public string `json:"public,omitempty"` - Size string `json:"size,omitempty"` -} - -// IsEmpty return true if DimensionFilters is nil, empty or contains only empty values -func (f Filter) IsEmpty() bool { - if len(f.DimensionFilters) == 0 { - return true - } - - for _, o := range f.DimensionFilters { - if o.Name != "" && len(o.Options) > 0 { - // return at the first non empty option - return false - } - } - - return true -} diff --git a/vendor/github.com/ONSdigital/dp-filter/observation/reader.go b/vendor/github.com/ONSdigital/dp-filter/observation/reader.go deleted file mode 100644 index ddcec05f..00000000 --- a/vendor/github.com/ONSdigital/dp-filter/observation/reader.go +++ /dev/null @@ -1,71 +0,0 @@ -package observation - -import "io" - -// Check that the reader conforms to the io.reader interface. -var _ io.Reader = (*Reader)(nil) - -// Reader is an io.Reader implementation that wraps a csvRowReader -type Reader struct { - csvRowReader CSVRowReader - buffer []byte // buffer a portion of the current line - eof bool // are we at the end of the csv rows? - totalBytesRead int64 // how many bytes in total have been read? - obsCount int32 -} - -// NewReader returns a new io.Reader for the given csvRowReader. -func NewReader(csvRowReader CSVRowReader) *Reader { - return &Reader{ - csvRowReader: csvRowReader, - } -} - -// Read bytes from the underlying csvRowReader -func (reader *Reader) Read(p []byte) (n int, err error) { - - // check if the next line needs to be read. - if reader.buffer == nil || len(reader.buffer) == 0 { - csvRow, err := reader.csvRowReader.Read() - if err == io.EOF { - reader.eof = true - } else if err != nil { - return 0, err - } - - reader.buffer = []byte(csvRow) - reader.obsCount++ - } - - // copy into the given byte array. - copied := copy(p, reader.buffer) - reader.totalBytesRead += int64(copied) - - // if the line is bigger than the array, slice the line to account for bytes read - if len(reader.buffer) > len(p) { - reader.buffer = reader.buffer[copied:] - } else { // the line is smaller than the array - clear the current line as it has all been read. - reader.buffer = nil - - if reader.eof { - return copied, io.EOF - } - } - - return copied, nil -} - -// Close the reader. -func (reader *Reader) Close() (err error) { - return reader.csvRowReader.Close() -} - -// TotalBytesRead returns the total number of bytes read by this reader. -func (reader *Reader) TotalBytesRead() int64 { - return reader.totalBytesRead -} - -// ObservationsCount returns the total number of bytes read by this reader. -func (reader *Reader) ObservationsCount() int32 { - return reader.obsCount -} diff --git a/vendor/github.com/ONSdigital/dp-filter/observation/row_reader.go b/vendor/github.com/ONSdigital/dp-filter/observation/row_reader.go deleted file mode 100644 index e2085cbc..00000000 --- a/vendor/github.com/ONSdigital/dp-filter/observation/row_reader.go +++ /dev/null @@ -1,88 +0,0 @@ -package observation - -import ( - "io" - - bolt "github.com/johnnadratowski/golang-neo4j-bolt-driver" - "github.com/johnnadratowski/golang-neo4j-bolt-driver/errors" -) - -//go:generate moq -out observationtest/bolt_rows.go -pkg observationtest . BoltRows -//go:generate moq -out observationtest/row_reader.go -pkg observationtest . CSVRowReader -//go:generate moq -out observationtest/db_connection.go -pkg observationtest . DBConnection - -// BoltRows provides an interface to each row of results returned from the database. -type BoltRows bolt.Rows - -// CSVRowReader provides a reader of individual rows (lines) of a CSV file. -type CSVRowReader interface { - Read() (string, error) - Close() error -} - -// DBConnection provides a method to close the connection once all the rows have been read -type DBConnection interface { - Close() error -} - -// BoltRowReader translates Neo4j rows to CSV rows. -type BoltRowReader struct { - rows BoltRows - connection DBConnection - rowsRead int -} - -// NewBoltRowReader returns a new reader instace for the given bolt rows. -func NewBoltRowReader(rows BoltRows, connection DBConnection) *BoltRowReader { - return &BoltRowReader{ - rows: rows, - connection: connection, - } -} - -// ErrNoDataReturned is returned if a Neo4j row has no data. -var ErrNoDataReturned = errors.New("no data returned in this row") - -// ErrUnrecognisedType is returned if a Neo4j row does not have the expected string value. -var ErrUnrecognisedType = errors.New("the value returned was not a string") - -// ErrNoInstanceFound is returned if no instance exists in neo4j -var ErrNoInstanceFound = errors.New("no instance found in datastore") - -// ErrNoResultsFound is returned if the selected filter options produce no results -var ErrNoResultsFound = errors.New("the filter options created no results") - -// Read the next row, or return io.EOF -func (reader *BoltRowReader) Read() (string, error) { - data, _, err := reader.rows.NextNeo() - if err != nil { - if err == io.EOF { - if reader.rowsRead == 0 { - return "", ErrNoInstanceFound - } else if reader.rowsRead == 1 { - return "", ErrNoResultsFound - } - } - return "", err - } - - if len(data) < 1 { - return "", ErrNoDataReturned - } - - if csvRow, ok := data[0].(string); ok { - reader.rowsRead++ - return csvRow + "\n", nil - } - - return "", ErrUnrecognisedType -} - -// Close the reader and the connection (For pooled connections this will release it back into the pool) -func (reader *BoltRowReader) Close() error { - err := reader.rows.Close() - if err != nil { - return err - } - return reader.connection.Close() -} diff --git a/vendor/github.com/ONSdigital/dp-filter/observation/store.go b/vendor/github.com/ONSdigital/dp-filter/observation/store.go deleted file mode 100644 index baf2d07e..00000000 --- a/vendor/github.com/ONSdigital/dp-filter/observation/store.go +++ /dev/null @@ -1,105 +0,0 @@ -package observation - -import ( - "fmt" - "strconv" - "strings" - - "github.com/ONSdigital/go-ns/log" - bolt "github.com/johnnadratowski/golang-neo4j-bolt-driver" -) - -//go:generate moq -out observationtest/db_pool.go -pkg observationtest . DBPool - -// Store represents storage for observation data. -type Store struct { - pool DBPool -} - -// DBPool provides a pool of database connections -type DBPool interface { - OpenPool() (bolt.Conn, error) -} - -// NewStore returns a new store instace using the given DB connection. -func NewStore(pool DBPool) *Store { - return &Store{ - pool: pool, - } -} - -// GetCSVRows returns a reader allowing individual CSV rows to be read. Rows returned -// can be limited, to stop this pass in nil. If filter.DimensionFilters is nil, empty or contains only empty values then -// a CSVRowReader for the entire dataset will be returned. -func (store *Store) GetCSVRows(filter *Filter, limit *int) (CSVRowReader, error) { - - headerRowQuery := fmt.Sprintf("MATCH (i:`_%s_Instance`) RETURN i.header as row", filter.InstanceID) - - unionQuery := headerRowQuery + " UNION ALL " + createObservationQuery(filter) - - if limit != nil { - limitAsString := strconv.Itoa(*limit) - unionQuery += " LIMIT " + limitAsString - } - - log.Info("neo4j query", log.Data{ - "filterID": filter.FilterID, - "instanceID": filter.InstanceID, - "query": unionQuery, - }) - conn, err := store.pool.OpenPool() - if err != nil { - return nil, err - } - - rows, err := conn.QueryNeo(unionQuery, nil) - if err != nil { - // Before returning the error "close" the open connection to release it back into the pool. - conn.Close() - return nil, err - } - // The connection can only be closed once the results have been read, so the row reader is responsible for - // releasing the connection back into the pool - return NewBoltRowReader(rows, conn), nil -} - -func createObservationQuery(filter *Filter) string { - if filter.IsEmpty() { - // if no dimension filter are specified than match all observations - log.Info("no dimension filters supplied, generating entire dataset query", log.Data{ - "filterID": filter.FilterID, - "instanceID": filter.InstanceID, - }) - return fmt.Sprintf("MATCH(o: `_%s_observation`) return o.value as row", filter.InstanceID) - } - - matchDimensions := "MATCH " - where := " WHERE " - - count := 0 - for _, dimension := range filter.DimensionFilters { - // If the dimension options is empty then don't bother specifying in the query as this will exclude all matches. - if len(dimension.Options) > 0 { - if count > 0 { - matchDimensions += ", " - where += " AND " - } - - matchDimensions += fmt.Sprintf("(o)-[:isValueOf]->(`%s`:`_%s_%s`)", dimension.Name, filter.InstanceID, dimension.Name) - where += createOptionList(dimension.Name, dimension.Options) - count++ - } - } - - return matchDimensions + where + " RETURN o.value AS row" -} - -func createOptionList(name string, opts []string) string { - var q []string - - for _, o := range opts { - q = append(q, fmt.Sprintf("`%s`.value='%s'", name, o)) - } - - return fmt.Sprintf("(%s)", strings.Join(q, " OR ")) -} diff --git a/vendor/github.com/ONSdigital/dp-graph/neptune/dimension.go b/vendor/github.com/ONSdigital/dp-graph/neptune/dimension.go index c76ef473..0cbe5b8f 100644 --- a/vendor/github.com/ONSdigital/dp-graph/neptune/dimension.go +++ b/vendor/github.com/ONSdigital/dp-graph/neptune/dimension.go @@ -2,10 +2,34 @@ package neptune import ( "context" + "fmt" "github.com/ONSdigital/dp-dimension-importer/model" + "github.com/ONSdigital/dp-graph/neptune/query" ) -func (n *NeptuneDB) InsertDimension(ctx context.Context, cache map[string]string, i *model.Instance, d *model.Dimension) (*model.Dimension, error) { - return nil, nil +// InsertDimension node to neptune and create relationships to the instance node. +// Where nodes and relationships already exist, ensure they are upserted. +func (n *NeptuneDB) InsertDimension(ctx context.Context, uniqueDimensions map[string]string, i *model.Instance, d *model.Dimension) (*model.Dimension, error) { + if err := i.Validate(); err != nil { + return nil, err + } + if err := d.Validate(); err != nil { + return nil, err + } + + dimensionLabel := fmt.Sprintf("_%s_%s", i.InstanceID, d.DimensionID) + + res, err := n.getVertex(fmt.Sprintf(query.CreateDimensionToInstanceRelationship, i.InstanceID, d.DimensionID, d.Option, i.InstanceID, d.DimensionID, d.Option, i.InstanceID)) + if err != nil { + return nil, err + } + + d.NodeID = res.GetID() + + if _, ok := uniqueDimensions[dimensionLabel]; !ok { + uniqueDimensions[dimensionLabel] = dimensionLabel + i.AddDimension(d) + } + return d, nil } diff --git a/vendor/github.com/ONSdigital/dp-graph/neptune/hierarchy.go b/vendor/github.com/ONSdigital/dp-graph/neptune/hierarchy.go index 394b184c..aebaa482 100644 --- a/vendor/github.com/ONSdigital/dp-graph/neptune/hierarchy.go +++ b/vendor/github.com/ONSdigital/dp-graph/neptune/hierarchy.go @@ -9,10 +9,11 @@ import ( "github.com/ONSdigital/dp-hierarchy-api/models" "github.com/ONSdigital/go-ns/log" "github.com/ONSdigital/graphson" + "github.com/pkg/errors" ) func (n *NeptuneDB) CreateInstanceHierarchyConstraints(ctx context.Context, attempt int, instanceID, dimensionName string) error { - return nil + return errors.New("method not supported: CreateInstanceHierarchyConstraints") } func (n *NeptuneDB) CloneNodes(ctx context.Context, attempt int, instanceID, codeListID, dimensionName string) (err error) { diff --git a/vendor/github.com/ONSdigital/dp-graph/neptune/instance.go b/vendor/github.com/ONSdigital/dp-graph/neptune/instance.go index f0da37ad..b10472dc 100644 --- a/vendor/github.com/ONSdigital/dp-graph/neptune/instance.go +++ b/vendor/github.com/ONSdigital/dp-graph/neptune/instance.go @@ -2,38 +2,161 @@ package neptune import ( "context" + "fmt" + "strings" "github.com/ONSdigital/dp-dimension-importer/model" + "github.com/ONSdigital/dp-graph/neptune/query" + "github.com/ONSdigital/go-ns/log" + gremgo "github.com/ONSdigital/gremgo-neptune" + "github.com/pkg/errors" ) +const codeListNotFoundFmt = "VertexStep(OUT,[usedBy],vertex), HasStep([~label.eq(_code_list_%s)" + +// CountInsertedObservations returns the current number of observations relating to a given instance func (n *NeptuneDB) CountInsertedObservations(ctx context.Context, instanceID string) (count int64, err error) { - return 0, nil + return n.getNumber(fmt.Sprintf(query.CountObservations, instanceID)) } +// AddVersionDetailsToInstance updates an instance node to contain details of which +// dataset, edition and version the instance will also be known by func (n *NeptuneDB) AddVersionDetailsToInstance(ctx context.Context, instanceID string, datasetID string, edition string, version int) error { + data := log.Data{ + "instance_id": instanceID, + "dataset_id": datasetID, + "edition": edition, + "version": version, + } + + q := fmt.Sprintf(query.AddVersionDetailsToInstance, instanceID, datasetID, edition, version) + + if _, err := n.exec(q); err != nil { + log.ErrorC("neptune exec failed on AddVersionDetailsToInstance", err, data) + return err + } return nil } +// SetInstanceIsPublished sets a flag on an instance node to indicate the published state func (n *NeptuneDB) SetInstanceIsPublished(ctx context.Context, instanceID string) error { + data := log.Data{ + "instance_id": instanceID, + } + + q := fmt.Sprintf(query.SetInstanceIsPublished, instanceID) + + if _, err := n.exec(q); err != nil { + log.ErrorC("neptune exec failed on SetInstanceIsPublished", err, data) + return err + } return nil } +// CreateInstanceConstraint is not needed for the neptune implementation, as constraints are +// not a neptune construct func (n *NeptuneDB) CreateInstanceConstraint(ctx context.Context, i *model.Instance) error { - return nil + return errors.New("method not supported: CreateInstanceConstraint") } +// CreateInstance will check if an instance node already exists and create one from +// the provided details if one does not exist func (n *NeptuneDB) CreateInstance(ctx context.Context, i *model.Instance) error { + if err := i.Validate(); err != nil { + return err + } + + data := log.Data{ + "instance_id": i.InstanceID, + } + + exists, err := n.InstanceExists(ctx, i) + if err != nil { + return err + } + + if exists { + log.Info("instance already exists in neptune", data) + return nil + } + + create := fmt.Sprintf(query.CreateInstance, i.InstanceID, strings.Join(i.CSVHeader, ",")) + if _, err := n.exec(create); err != nil { + log.ErrorC("neptune exec failed on CreateInstance", err, data) + return err + } return nil } +// AddDimensions list to the specified instance node func (n *NeptuneDB) AddDimensions(ctx context.Context, i *model.Instance) error { + if err := i.Validate(); err != nil { + return err + } + + data := log.Data{ + "instance_id": i.InstanceID, + } + + q := fmt.Sprintf(query.AddInstanceDimensionsPart, i.InstanceID) + for _, d := range i.Dimensions { + q += fmt.Sprintf(query.AddInstanceDimensionsPropertyPart, d.(string)) + } + + if _, err := n.exec(q); err != nil { + log.ErrorC("neptune exec failed on AddDimensions", err, data) + return err + } + return nil } +// CreateCodeRelationship links an instance to a code for the given dimension option func (n *NeptuneDB) CreateCodeRelationship(ctx context.Context, i *model.Instance, codeListID, code string) error { + if err := i.Validate(); err != nil { + return err + } + + if len(code) == 0 { + return errors.New("error creating relationship from instance to code: code is required but was empty") + } + + data := log.Data{ + "instance_id": i.InstanceID, + "code_list": codeListID, + "code": code, + } + + createRelationships := fmt.Sprintf(query.CreateInstanceToCodeRelationship, i.InstanceID, code, codeListID) + if res, err := n.exec(createRelationships); err != nil { + if len(res) > 0 && res[0].Status.Code == gremgo.StatusScriptEvaluationError && + strings.Contains(res[0].Status.Message, fmt.Sprintf(codeListNotFoundFmt, codeListID)) { + + return errors.Wrapf(err, "error creating relationship from instance to code: code or code list not found", data) + } + log.ErrorC("neptune exec failed on CreateCodeRelationship", err, data) + return err + } + return nil } +// InstanceExists returns true if an instance already exists with the provided id func (n *NeptuneDB) InstanceExists(ctx context.Context, i *model.Instance) (bool, error) { + data := log.Data{ + "instance_id": i.InstanceID, + } + + exists := fmt.Sprintf(query.CheckInstance, i.InstanceID) + count, err := n.getNumber(exists) + if err != nil { + log.ErrorC("neptune getNumber failed to check if instance exists", err, data) + return false, err + } + + if count == 0 { + return false, nil + } + return true, nil } diff --git a/vendor/github.com/ONSdigital/dp-graph/neptune/internal/pool.go b/vendor/github.com/ONSdigital/dp-graph/neptune/internal/pool.go index ec532750..17bc839b 100644 --- a/vendor/github.com/ONSdigital/dp-graph/neptune/internal/pool.go +++ b/vendor/github.com/ONSdigital/dp-graph/neptune/internal/pool.go @@ -5,11 +5,10 @@ package internal import ( "context" - "sync" - "github.com/ONSdigital/dp-graph/neptune/driver" "github.com/ONSdigital/graphson" "github.com/ONSdigital/gremgo-neptune" + "sync" ) var ( diff --git a/vendor/github.com/ONSdigital/dp-graph/neptune/neptune.go b/vendor/github.com/ONSdigital/dp-graph/neptune/neptune.go index 453cce87..d02c084b 100644 --- a/vendor/github.com/ONSdigital/dp-graph/neptune/neptune.go +++ b/vendor/github.com/ONSdigital/dp-graph/neptune/neptune.go @@ -12,6 +12,7 @@ import ( "github.com/ONSdigital/dp-graph/neptune/driver" "github.com/ONSdigital/go-ns/log" "github.com/ONSdigital/graphson" + gremgo "github.com/ONSdigital/gremgo-neptune" ) type NeptuneDB struct { @@ -155,7 +156,7 @@ func (n *NeptuneDB) getEdges(gremStmt string) (edges []graphson.Edge, err error) return } -func (n *NeptuneDB) exec(gremStmt string) (res interface{}, err error) { +func (n *NeptuneDB) exec(gremStmt string) (res []gremgo.Response, err error) { logData := log.Data{"fn": "n.exec", "statement": gremStmt, "attempt": 1} for attempt := 1; attempt < n.maxAttempts; attempt++ { diff --git a/vendor/github.com/ONSdigital/dp-graph/neptune/observation.go b/vendor/github.com/ONSdigital/dp-graph/neptune/observation.go index 93c28dd0..ffef0caf 100644 --- a/vendor/github.com/ONSdigital/dp-graph/neptune/observation.go +++ b/vendor/github.com/ONSdigital/dp-graph/neptune/observation.go @@ -17,6 +17,11 @@ import ( // ErrInvalidFilter is returned if the provided filter is nil. var ErrInvalidFilter = errors.New("nil filter cannot be processed") +// TODO: this global state is only used for metrics in InsertObservationBatch, +// not used in actual code flow, but should be revisited before production use +var batchCount = 0 +var totalTime time.Time + func (n *NeptuneDB) StreamCSVRows(ctx context.Context, filter *observation.Filter, limit *int) (observation.StreamRowReader, error) { if filter == nil { return nil, ErrInvalidFilter @@ -40,6 +45,7 @@ func buildObservationsQuery(f *observation.Filter) string { } q := fmt.Sprintf(query.GetObservationsPart, f.InstanceID) + var selectOpts []string for _, dim := range f.DimensionFilters { if len(dim.Options) == 0 { @@ -50,34 +56,29 @@ func buildObservationsQuery(f *observation.Filter) string { dim.Options[i] = fmt.Sprintf("'%s'", opt) } - q += fmt.Sprintf(query.GetObservationDimensionPart, f.InstanceID, dim.Name, strings.Join(dim.Options, ",")) + "," + selectOpts = append(selectOpts, fmt.Sprintf(query.GetObservationDimensionPart, f.InstanceID, dim.Name, strings.Join(dim.Options, ","))) } - //remove trailing comma and close match statement - q = strings.Trim(q, ",") + //comma separate dimension option selections and close match statement + q += strings.Join(selectOpts, ",") q += ")" return q } -// TODO: this global state is only used for metrics, not actual code flow, -// but should be revisited before production use -var batchCount = 0 -var totalTime time.Time - -func (n *NeptuneDB) InsertObservationBatch(ctx context.Context, attempt int, instanceID string, observations []*models.Observation, dimensionIDs map[string]string) error { +func (n *NeptuneDB) InsertObservationBatch(ctx context.Context, attempt int, instanceID string, observations []*models.Observation, dimensionNodeIDs map[string]string) error { if len(observations) == 0 { - fmt.Println("range should be empty") + log.Info("no observations in batch", log.Data{"instance_ID": instanceID}) return nil } - c := batchCount + bID := batchCount batchCount++ batchStart := time.Now() if totalTime.IsZero() { totalTime = batchStart } else { - log.Info("opening batch", log.Data{"size": len(observations), "batchID": c}) + log.Info("opening batch", log.Data{"size": len(observations), "batchID": bID}) } var create string @@ -89,7 +90,7 @@ func (n *NeptuneDB) InsertObservationBatch(ctx context.Context, attempt int, ins dimensionName := strings.ToLower(d.DimensionName) dimensionLookup := instanceID + "_" + dimensionName + "_" + d.Name - nodeID, ok := dimensionIDs[dimensionLookup] + nodeID, ok := dimensionNodeIDs[dimensionLookup] if !ok { return fmt.Errorf("no nodeID [%s] found in dimension map", dimensionLookup) } @@ -105,6 +106,6 @@ func (n *NeptuneDB) InsertObservationBatch(ctx context.Context, attempt int, ins return err } - log.Info("batch complete", log.Data{"batchID": c, "elapsed": time.Since(totalTime), "batchTime": time.Since(batchStart)}) + log.Info("batch complete", log.Data{"batchID": bID, "elapsed": time.Since(totalTime), "batchTime": time.Since(batchStart)}) return nil } diff --git a/vendor/github.com/ONSdigital/dp-graph/neptune/query/query.go b/vendor/github.com/ONSdigital/dp-graph/neptune/query/query.go index 259adba9..789ffe1e 100644 --- a/vendor/github.com/ONSdigital/dp-graph/neptune/query/query.go +++ b/vendor/github.com/ONSdigital/dp-graph/neptune/query/query.go @@ -76,17 +76,22 @@ const ( // instance - import process CreateInstance = "g.addV('_%s_Instance').property(single,'header','%s')" - CountInstance = "g.V().hasLabel('_%s_Instance').count()" - AddInstanceDimensions = "g.V().hasLabel('_%s_Instance').property('dimensions',%s)" + CheckInstance = "g.V().hasLabel('_%s_Instance').count()" CreateInstanceToCodeRelationship = "g.V().hasLabel('_%s_Instance').as('i').addE('inDataset').from(" + - "V().hasLabel('_code').has('value','%s').where(out('usedBy').has(label,'_code_list_%s'))" + + "V().hasLabel('_code').has('value','%s').where(out('usedBy').hasLabel('_code_list').has('listID','%s'))" + ")" - AddVersionDetailsToInstance = "g.V().hasLabel('_%s_Instance').property(single,'dataset_id','%s').property(single,'edition','%s').property(single,'version','%s')" - SetInstanceIsPublished = "g.V().hasLabel('_%s_Instance').property(single,'is_published',true)" - CountObservations = "g.V().hasLabel('_%s_observation').count()" + AddVersionDetailsToInstance = "g.V().hasLabel('_%s_Instance').property(single,'dataset_id','%s')." + + "property(single,'edition','%s').property(single,'version','%s')" + SetInstanceIsPublished = "g.V().hasLabel('_%s_Instance').property(single,'is_published',true)" + CountObservations = "g.V().hasLabel('_%s_observation').count()" + + //instance - parts + AddInstanceDimensionsPart = "g.V().hasLabel('_%s_Instance')" + AddInstanceDimensionsPropertyPart = ".property(list, 'dimensions', '%s')" // dimension - CreateDimensionToInstanceRelationship = "g.addV('_%s_%s').property('value','%s').as('d').addE('HAS_DIMENSION').from(V().hasLabel('_%s_Instance')).select('d').by(id)" + CreateDimensionToInstanceRelationship = "g.V().hasLabel('_%s_%s').has('value', '%s').fold().coalesce(unfold(), " + + "addV('_%s_%s').as('d').property('value','%s').addE('HAS_DIMENSION').from(V().hasLabel('_%s_Instance')).select('d'))" // observation DropObservationRelationships = "g.V().hasLabel('_%s_observation').has('value', '%s').bothE().drop().iterate()" diff --git a/vendor/github.com/ONSdigital/gremgo-neptune/client.go b/vendor/github.com/ONSdigital/gremgo-neptune/client.go index 26a230cd..4e6a4505 100644 --- a/vendor/github.com/ONSdigital/gremgo-neptune/client.go +++ b/vendor/github.com/ONSdigital/gremgo-neptune/client.go @@ -198,7 +198,7 @@ func (c *Client) GetCtx(ctx context.Context, query string, bindings, rebindings } func (c *Client) deserializeResponseToVertices(resp []Response) (res []graphson.Vertex, err error) { - if len(resp) == 0 || resp[0].Status.Code == statusNoContent { + if len(resp) == 0 || resp[0].Status.Code == StatusNoContent { return } @@ -266,7 +266,7 @@ func (c *Client) GetEdgeCtx(ctx context.Context, query string, bindings, rebindi if err != nil { return } - if len(resp) == 0 || resp[0].Status.Code == statusNoContent { + if len(resp) == 0 || resp[0].Status.Code == StatusNoContent { return } diff --git a/vendor/github.com/ONSdigital/gremgo-neptune/response.go b/vendor/github.com/ONSdigital/gremgo-neptune/response.go index 3e307554..620a1c82 100644 --- a/vendor/github.com/ONSdigital/gremgo-neptune/response.go +++ b/vendor/github.com/ONSdigital/gremgo-neptune/response.go @@ -9,17 +9,17 @@ import ( ) const ( - statusSuccess = 200 - statusNoContent = 204 - statusPartialContent = 206 - statusUnauthorized = 401 - statusAuthenticate = 407 - statusMalformedRequest = 498 - statusInvalidRequestArguments = 499 - statusServerError = 500 - statusScriptEvaluationError = 597 - statusServerTimeout = 598 - statusServerSerializationError = 599 + StatusSuccess = 200 + StatusNoContent = 204 + StatusPartialContent = 206 + StatusUnauthorized = 401 + StatusAuthenticate = 407 + StatusMalformedRequest = 498 + StatusInvalidRequestArguments = 499 + StatusServerError = 500 + StatusScriptEvaluationError = 597 + StatusServerTimeout = 598 + StatusServerSerializationError = 599 ) // Status struct is used to hold properties returned from requests to the gremlin server @@ -64,7 +64,7 @@ func (c *Client) saveWorkerCtx(ctx context.Context, msgChan chan []byte, errs ch func (c *Client) handleResponse(msg []byte) (err error) { var resp Response resp, err = marshalResponse(msg) - if resp.Status.Code == statusAuthenticate { //Server request authentication + if resp.Status.Code == StatusAuthenticate { //Server request authentication return c.authenticate(resp.RequestID) } c.saveResponse(resp, err) @@ -98,7 +98,7 @@ func (c *Client) saveResponse(resp Response, err error) { c.results.Store(resp.RequestID, newdata) // Add new data to buffer for future retrieval respNotifier, _ := c.responseNotifier.LoadOrStore(resp.RequestID, make(chan error, 1)) // err is from marshalResponse (json.Unmarshal), but is ignored when Code==statusPartialContent - if resp.Status.Code == statusPartialContent { + if resp.Status.Code == StatusPartialContent { if chunkNotifier, ok := c.chunkNotifier.Load(resp.RequestID); ok { chunkNotifier.(chan bool) <- true } @@ -207,22 +207,22 @@ func (c *Client) deleteResponse(id string) { // detectError detects any possible errors in responses from Gremlin Server and generates an error for each code func (r *Response) detectError() (err error) { switch r.Status.Code { - case statusSuccess, statusNoContent, statusPartialContent: - case statusUnauthorized: + case StatusSuccess, StatusNoContent, StatusPartialContent: + case StatusUnauthorized: err = fmt.Errorf("UNAUTHORIZED - Response Message: %s", r.Status.Message) - case statusAuthenticate: + case StatusAuthenticate: err = fmt.Errorf("AUTHENTICATE - Response Message: %s", r.Status.Message) - case statusMalformedRequest: + case StatusMalformedRequest: err = fmt.Errorf("MALFORMED REQUEST - Response Message: %s", r.Status.Message) - case statusInvalidRequestArguments: + case StatusInvalidRequestArguments: err = fmt.Errorf("INVALID REQUEST ARGUMENTS - Response Message: %s", r.Status.Message) - case statusServerError: + case StatusServerError: err = fmt.Errorf("SERVER ERROR - Response Message: %s", r.Status.Message) - case statusScriptEvaluationError: + case StatusScriptEvaluationError: err = fmt.Errorf("SCRIPT EVALUATION ERROR - Response Message: %s", r.Status.Message) - case statusServerTimeout: + case StatusServerTimeout: err = fmt.Errorf("SERVER TIMEOUT - Response Message: %s", r.Status.Message) - case statusServerSerializationError: + case StatusServerSerializationError: err = fmt.Errorf("SERVER SERIALIZATION ERROR - Response Message: %s", r.Status.Message) default: err = fmt.Errorf("UNKNOWN ERROR - Response Message: %s", r.Status.Message) diff --git a/vendor/github.com/gedge/graphson/deserialize.go b/vendor/github.com/gedge/graphson/deserialize.go deleted file mode 100644 index f0b96aed..00000000 --- a/vendor/github.com/gedge/graphson/deserialize.go +++ /dev/null @@ -1,246 +0,0 @@ -package graphson - -import ( - "bytes" - "encoding/json" - "errors" - "fmt" -) - -func DeserializeVertices(rawResponse string) ([]Vertex, error) { - // TODO: empty strings for property values will cause invalid json - // make so it can handle that case - if len(rawResponse) == 0 { - return []Vertex{}, nil - } - return DeserializeVerticesFromBytes([]byte(rawResponse)) -} - -func DeserializeVerticesFromBytes(rawResponse []byte) ([]Vertex, error) { - // TODO: empty strings for property values will cause invalid json - // make so it can handle that case - var response []Vertex - if len(rawResponse) == 0 { - return response, nil - } - dec := json.NewDecoder(bytes.NewReader(rawResponse)) - dec.DisallowUnknownFields() - if err := dec.Decode(&response); err != nil { - return nil, err - } - return response, nil -} - -func DeserializeListOfVerticesFromBytes(rawResponse []byte) ([]Vertex, error) { - var metaResponse ListVertices - var response []Vertex - if len(rawResponse) == 0 { - return response, nil - } - dec := json.NewDecoder(bytes.NewReader(rawResponse)) - dec.DisallowUnknownFields() - if err := dec.Decode(&metaResponse); err != nil { - return nil, err - } - - if metaResponse.Type != "g:List" { - return response, errors.New("DeserializeListOfVerticesFromBytes: Expected `g:List` type") - } - - return metaResponse.Value, nil -} - -func DeserializeListOfEdgesFromBytes(rawResponse []byte) (Edges, error) { - var metaResponse ListEdges - var response Edges - if len(rawResponse) == 0 { - return response, nil - } - dec := json.NewDecoder(bytes.NewReader(rawResponse)) - dec.DisallowUnknownFields() - err := dec.Decode(&metaResponse) - if err != nil { - return nil, err - } - - if metaResponse.Type != "g:List" { - return response, errors.New("DeserializeListOfEdgesFromBytes: Expected `g:List` type") - } - - return metaResponse.Value, nil -} - -func DeserializeMapFromBytes(rawResponse []byte) (resMap map[string]interface{}, err error) { - var metaResponse GList - if len(rawResponse) == 0 { - return - } - dec := json.NewDecoder(bytes.NewReader(rawResponse)) - dec.DisallowUnknownFields() - if err = dec.Decode(&metaResponse); err != nil { - return nil, err - } - - if metaResponse.Type != "g:Map" { - return resMap, errors.New("DeserializeMapFromBytes: Expected `g:Map` type") - } - - return resMap, nil -} - -// DeserializePropertiesFromBytes is for converting vertex .properties() results into a map -func DeserializePropertiesFromBytes(rawResponse []byte, resMap map[string][]interface{}) (err error) { - var metaResponse GList - if len(rawResponse) == 0 { - return - } - dec := json.NewDecoder(bytes.NewReader(rawResponse)) - dec.DisallowUnknownFields() - if err = dec.Decode(&metaResponse); err != nil { - return - } - - if metaResponse.Type != "g:List" { - return errors.New("DeserializePropertiesFromBytes: Expected `g:List` type") - } - var props []VertexProperty - if err = json.Unmarshal(metaResponse.Value, &props); err != nil { - return - } - - for _, prop := range props { - if _, ok := resMap[prop.Value.Label]; !ok { - resMap[prop.Value.Label] = []interface{}{prop.Value.Value} - } else { - resMap[prop.Value.Label] = append(resMap[prop.Value.Label], prop.Value.Value) - } - } - - return -} - -// DeserializeStringListFromBytes get a g:List value which should be a a list of strings, return those -func DeserializeStringListFromBytes(rawResponse []byte) (vals []string, err error) { - var metaResponse GList - if len(rawResponse) == 0 { - err = errors.New("DeserializeStringListFromBytes: nothing to decode") - return - } - dec := json.NewDecoder(bytes.NewReader(rawResponse)) - dec.DisallowUnknownFields() - if err = dec.Decode(&metaResponse); err != nil { - return - } - - if metaResponse.Type != "g:List" { - err = errors.New("DeserializeStringListFromBytes: Expected `g:List` type") - return - } - - if err = json.Unmarshal(metaResponse.Value, &vals); err != nil { - return - } - return -} - -// DeserializeSingleFromBytes get a g:List value which should be a singular item, returns that item -func DeserializeSingleFromBytes(rawResponse []byte) (gV GenericValue, err error) { - var metaResponse GList - if len(rawResponse) == 0 { - err = errors.New("DeserializeSingleFromBytes: nothing to decode") - return - } - dec := json.NewDecoder(bytes.NewReader(rawResponse)) - dec.DisallowUnknownFields() - if err = dec.Decode(&metaResponse); err != nil { - return - } - - if metaResponse.Type != "g:List" { - err = errors.New("DeserializeSingleFromBytes: Expected `g:List` type") - return - } - - var genVals GenericValues - if genVals, err = DeserializeGenericValues(string(metaResponse.Value)); err != nil { - return - } - - if len(genVals) != 1 { - err = fmt.Errorf("DeserializeSingleFromBytes: Expected single value, got %d", len(genVals)) - return - } - - return genVals[0], nil -} - -// DeserializeNumber returns the count from the g:List'd database response -func DeserializeNumber(rawResponse []byte) (count int64, err error) { - var genVal GenericValue - if genVal, err = DeserializeSingleFromBytes(rawResponse); err != nil { - return - } - - if genVal.Type != "g:Int64" { - err = errors.New("DeserializeNumber: Expected `g:Int64` type") - return - } - count = int64(genVal.Value.(float64)) - return -} - -func DeserializeEdges(rawResponse string) (Edges, error) { - var response Edges - if rawResponse == "" { - return response, nil - } - err := json.Unmarshal([]byte(rawResponse), &response) - if err != nil { - return nil, err - } - return response, nil -} - -func DeserializeGenericValue(rawResponse string) (response GenericValue, err error) { - if len(rawResponse) == 0 { - return - } - if err = json.Unmarshal([]byte(rawResponse), &response); err != nil { - return - } - return -} - -func DeserializeGenericValues(rawResponse string) (GenericValues, error) { - var response GenericValues - if rawResponse == "" { - return response, nil - } - err := json.Unmarshal([]byte(rawResponse), &response) - if err != nil { - return nil, err - } - return response, nil -} - -func ConvertToCleanVertices(vertices []Vertex) []CleanVertex { - var responseVertices []CleanVertex - for _, vertex := range vertices { - responseVertices = append(responseVertices, CleanVertex{ - Id: vertex.Value.ID, - Label: vertex.Value.Label, - }) - } - return responseVertices -} - -func ConvertToCleanEdges(edges Edges) []CleanEdge { - var responseEdges []CleanEdge - for _, edge := range edges { - responseEdges = append(responseEdges, CleanEdge{ - Source: edge.Value.InV, - Target: edge.Value.OutV, - }) - } - return responseEdges -} diff --git a/vendor/github.com/gedge/graphson/types.go b/vendor/github.com/gedge/graphson/types.go deleted file mode 100644 index 028027eb..00000000 --- a/vendor/github.com/gedge/graphson/types.go +++ /dev/null @@ -1,153 +0,0 @@ -package graphson - -import "encoding/json" - -// cbi made up, not a real graphson or gremlin thing -// type GremlinResponse struct { -// V Vertices -// E Edges -// } - -type GList struct { - Type string `json:"@type"` - Value json.RawMessage `json:"@value"` -} - -// type GMap struct { -// Type string `json:"@type"` -// Value json.RawMessage `json:"@value"` -// } - -type ListVertices struct { - Type string `json:"@type"` - Value []Vertex `json:"@value"` -} -type ListEdges struct { - Type string `json:"@type"` - Value Edges `json:"@value"` -} - -// type Vertices []Vertex - -type Vertex struct { - Type string `json:"@type"` - Value VertexValue `json:"@value"` -} - -type VertexValue struct { - ID string `json:"id"` - Label string `json:"label"` - Properties map[string][]VertexProperty `json:"properties"` -} - -type VertexProperty struct { - Type string `json:"@type"` - Value VertexPropertyValue `json:"@value"` -} - -type EdgeProperty struct { - Type string `json:"@type"` - Value EdgePropertyValue `json:"@value"` -} - -type VertexPropertyValue struct { - ID GenericValue `json:"id"` - Label string `json:"label"` - Value interface{} `json:"value"` -} - -type EdgePropertyValue struct { - Label string `json:"key"` - // Value GenericValue `json:"value"` // this works when value is NOT a string - Value json.RawMessage `json:"value"` - // ValueStr string `json:"value"` - // Value interface{} `json:"value"` -} - -type GenericValues []GenericValue - -type GenericValue struct { - Type string `json:"@type"` - Value interface{} `json:"@value"` -} - -type Edges []Edge - -type Edge struct { - Type string `json:"@type"` - Value EdgeValue `json:"@value"` -} - -type EdgeValue struct { - ID string `json:"id"` - Label string `json:"label"` - InVLabel string `json:"inVLabel"` - OutVLabel string `json:"outVLabel"` - InV string `json:"inV"` - OutV string `json:"outV"` - Properties map[string]EdgeProperty `json:"properties"` -} - -// type CleanResponse struct { -// V []CleanVertex -// E []CleanEdge -// } - -type CleanEdge struct { - Source string `json:"source"` - Target string `json:"target"` -} - -type CleanVertex struct { - Id string `json:"id"` - Label string `json:"label"` -} - -// type MinVertex struct { -// ID string -// Label string -// Props map[string][]MinVertexProp -// } -// type MinVertexProp struct { -// // ID string -// Label string -// Value interface{} -// } - -// type UpsertVertexMap struct { -// Id string `json:""` -// Label string `json:"label"` -// } - -// type TypeID int - -// const ( -// TypeString TypeID = iota -// TypeBoolean -// TypeMap -// TypeCollection -// TypeClass -// TypeDate -// TypeDouble -// TypeFloat -// TypeInteger -// TypeLong -// TypeTimestamp -// TypeUUID -// TypeVertex -// TypeVertexProperty -// ) - -// const ( -// TypeStrDate = "g:Date" -// TypeStrDouble = "g:Double" -// TypeStrFloat = "g:Float" -// TypeStrInteger = "g:Int32" -// TypeStrLong = "g:Int64" -// TypeStrTimestamp = "g:Timestamp" -// TypeStrUUID = "g:UUID" -// TypeStrVertex = "g:Vertex" -// TypeStrVertexProperty = "g:VertexProperty" -// TypeStrProperty = "g:Property" -// TypeStrEdge = "g:Edge" -// ) diff --git a/vendor/github.com/gedge/graphson/utils.go b/vendor/github.com/gedge/graphson/utils.go deleted file mode 100644 index 29243048..00000000 --- a/vendor/github.com/gedge/graphson/utils.go +++ /dev/null @@ -1,238 +0,0 @@ -package graphson - -import ( - "errors" - "strings" -) - -var ( - ErrorPropertyNotFound = errors.New("property not found") - ErrorPropertyIsMeta = errors.New("meta-property found where multi-property expected") - ErrorPropertyIsMulti = errors.New("multi-property found where singleton expected") - ErrorUnexpectedPropertyType = errors.New("property value could not be cast into expected type") -) - -// GetID returns the string ID for the given vertex -func (v Vertex) GetID() string { - return v.Value.ID -} - -// GetLabels returns the []string labels for the given vertex -func (v Vertex) GetLabels() (labels []string) { - labels = append(labels, v.Value.Label) - if strings.Index(labels[0], "::") == -1 { - return - } - return strings.Split(labels[0], "::") -} - -// GetLabel returns the string label for the given vertex, or an error if >1 -func (v Vertex) GetLabel() (string, error) { - labels := v.GetLabels() - if len(labels) > 1 { - return "", errors.New("too many labels - expected one") - } - return labels[0], nil -} - -// GetMultiProperty returns the ([]string) values for the given property `key` -// will return an error if the property is not the correct type -func (v Vertex) GetMultiProperty(key string) (vals []string, err error) { - var valsInterface []interface{} - if valsInterface, err = v.GetMultiPropertyAs(key, "string"); err != nil { - return - } - for _, val := range valsInterface { - vals = append(vals, val.(string)) - } - return -} - -// GetMultiPropertyBool returns the ([]bool) values for the given property `key` -// will return an error if the property is not the correct type -func (v Vertex) GetMultiPropertyBool(key string) (vals []bool, err error) { - var valsInterface []interface{} - if valsInterface, err = v.GetMultiPropertyAs(key, "bool"); err != nil { - return - } - for _, val := range valsInterface { - vals = append(vals, val.(bool)) - } - return -} - -// GetMultiPropertyInt64 returns the ([]int64) values for the given property `key` -// will return an error if the property is not the correct type -func (v Vertex) GetMultiPropertyInt64(key string) (vals []int64, err error) { - var valsInterface []interface{} - if valsInterface, err = v.GetMultiPropertyAs(key, "int64"); err != nil { - return - } - for _, val := range valsInterface { - vals = append(vals, val.(int64)) - } - return -} - -// GetMultiPropertyInt32 returns the ([]int32) values for the given property `key` -// will return an error if the property is not the correct type -func (v Vertex) GetMultiPropertyInt32(key string) (vals []int32, err error) { - var valsInterface []interface{} - if valsInterface, err = v.GetMultiPropertyAs(key, "int32"); err != nil { - return - } - for _, val := range valsInterface { - vals = append(vals, val.(int32)) - } - return -} - -// getMultiPropertyAs returns the values for the given property `key` as type `wantType` -// will return an error if the property is not a set of the given `wantType` (string, bool, int64) -func (v Vertex) GetMultiPropertyAs(key, wantType string) (vals []interface{}, err error) { - var valInterface []VertexProperty - var ok bool - if valInterface, ok = v.Value.Properties[key]; !ok { - err = ErrorPropertyNotFound - return - } - for _, prop := range valInterface { - if prop.Value.Label != key { - err = ErrorPropertyIsMulti - return - } - switch wantType { - - case "string": - var val string - if val, ok = prop.Value.Value.(string); !ok { - err = ErrorUnexpectedPropertyType - return - } - vals = append(vals, val) - case "bool": - var val bool - if val, ok = prop.Value.Value.(bool); !ok { - err = ErrorUnexpectedPropertyType - return - } - vals = append(vals, val) - case "int32": - var typeIf, valIf interface{} - if typeIf, ok = prop.Value.Value.(map[string]interface{})["@type"]; !ok || typeIf != "g:Int32" { - return vals, ErrorUnexpectedPropertyType - } - if valIf, ok = prop.Value.Value.(map[string]interface{})["@value"]; !ok { - return vals, ErrorUnexpectedPropertyType - } - var val float64 - if val, ok = valIf.(float64); !ok { - return vals, ErrorUnexpectedPropertyType - } - vals = append(vals, int32(val)) - case "int64": - var val int64 - if val, ok = prop.Value.Value.(int64); !ok { - err = ErrorUnexpectedPropertyType - return - } - vals = append(vals, val) - } - } - return -} - -// GetProperty returns the single string value for a given property `key` -// will return an error if the property is not a single string -func (v Vertex) GetProperty(key string) (val string, err error) { - var vals []string - if vals, err = v.GetMultiProperty(key); err != nil { - return - } - if len(vals) == 0 { - err = ErrorPropertyNotFound - return - } - if len(vals) > 1 { - err = ErrorPropertyIsMulti - return - } - return vals[0], nil -} - -// GetPropertyInt64 returns the single int64 value for a given property `key` -// will return an error if the property is not a single string -func (v Vertex) GetPropertyInt64(key string) (val int64, err error) { - var valsInterface []interface{} - if valsInterface, err = v.GetMultiPropertyAs(key, "int64"); err != nil { - return - } - if len(valsInterface) == 0 { - err = ErrorPropertyNotFound - return - } - if len(valsInterface) > 1 { - err = ErrorPropertyIsMulti - return - } - return valsInterface[0].(int64), nil -} - -// GetPropertyInt32 returns the single int32 value for a given property `key` -// will return an error if the property is not a single string -func (v Vertex) GetPropertyInt32(key string) (val int32, err error) { - var valsInterface []interface{} - if valsInterface, err = v.GetMultiPropertyAs(key, "int32"); err != nil { - return - } - if len(valsInterface) == 0 { - err = ErrorPropertyNotFound - return - } - if len(valsInterface) > 1 { - err = ErrorPropertyIsMulti - return - } - return valsInterface[0].(int32), nil -} - -// GetPropertyBool returns the single bool value for a given property `key` -// will return an error if the property is not a single string -func (v Vertex) GetPropertyBool(key string) (val bool, err error) { - var valsInterface []interface{} - if valsInterface, err = v.GetMultiPropertyAs(key, "bool"); err != nil { - return - } - if len(valsInterface) == 0 { - err = ErrorPropertyNotFound - return - } - if len(valsInterface) > 1 { - err = ErrorPropertyIsMulti - return - } - return valsInterface[0].(bool), nil -} - -// GetMetaProperty returns a map[string]string for the given property `key` -func (v Vertex) GetMetaProperty(key string) (metaMap map[string][]string, err error) { - var valInterface []VertexProperty - var ok bool - if valInterface, ok = v.Value.Properties[key]; !ok { - err = ErrorPropertyNotFound - return - } - for _, prop := range valInterface { - subKey := prop.Value.Label - var subVal string - if subVal, ok = prop.Value.Value.(string); !ok { - err = ErrorUnexpectedPropertyType - return - } - if metaMap == nil { - metaMap = make(map[string][]string) - } - metaMap[subKey] = append(metaMap[subKey], subVal) - } - return -} diff --git a/vendor/github.com/gedge/graphson/validation_utils.go b/vendor/github.com/gedge/graphson/validation_utils.go deleted file mode 100644 index 39d190c4..00000000 --- a/vendor/github.com/gedge/graphson/validation_utils.go +++ /dev/null @@ -1,94 +0,0 @@ -package graphson - -import ( - "fmt" -) - -func EdgesMatch(edge1, edge2 Edge) (bool, string) { - if edge1.Type != edge2.Type { - return false, "type" - } - // if ok, reason := GenericValuesMatch(edge1.Value.ID, edge2.Value.ID); !ok { - if edge1.Value.ID != edge2.Value.ID { - return false, "id" // + reason - } - if edge1.Value.Label != edge2.Value.Label { - return false, "label" - } - // if ok, reason := GenericValuesMatch(edge1.Value.InV, edge2.Value.InV); !ok { - if edge1.Value.InV != edge2.Value.InV { - return false, "inv" // + reason - } - if edge1.Value.InVLabel != edge2.Value.InVLabel { - return false, "invlabel" - } - // if ok, reason := GenericValuesMatch(edge1.Value.OutV, edge2.Value.OutV); !ok { - if edge1.Value.OutV != edge2.Value.OutV { - return false, "outv" // + reason - } - if edge1.Value.OutVLabel != edge2.Value.OutVLabel { - return false, "outvlabel" - } - if len(edge1.Value.Properties) != len(edge2.Value.Properties) { - return false, "properties" - } - for label, edge1Props := range edge1.Value.Properties { - edge2Props := edge2.Value.Properties[label] - if edge1Props.Type != edge2Props.Type { - return false, "prop.type" - } - if edge1Props.Value.Label != edge2Props.Value.Label || - fmt.Sprintf("%v", edge1Props.Value.Label) != fmt.Sprintf("%v", edge2Props.Value.Label) { - return false, "prop.value" - } - } - return true, "" -} - -func VerticesMatch(vertex1, vertex2 Vertex) bool { - if vertex1.Type != vertex2.Type { - return false - } - if vertex1.Value.ID != vertex2.Value.ID { - return false - } - if vertex1.Value.Label != vertex2.Value.Label { - return false - } - if len(vertex1.Value.Properties) != len(vertex2.Value.Properties) { - return false - } - for label, vertex1Props := range vertex1.Value.Properties { - vertex2Props := vertex2.Value.Properties[label] - if len(vertex1Props) != len(vertex2Props) { - return false - - } - for i, vertex1PropsElement := range vertex1Props { - vertex2PropsElement := vertex2Props[i] - if vertex1PropsElement.Type != vertex2PropsElement.Type { - return false - } - if vertex1PropsElement.Value.ID.Type != vertex2PropsElement.Value.ID.Type || - fmt.Sprintf("%v", vertex1PropsElement.Value.ID.Value) != fmt.Sprintf("%v", vertex2PropsElement.Value.ID.Value) { - return false - } - if vertex1PropsElement.Value.Label != vertex2PropsElement.Value.Label { - return false - } - if fmt.Sprintf("%v", vertex1PropsElement.Value.Value) != fmt.Sprintf("%v", vertex2PropsElement.Value.Value) { - return false - } - } - } - return true -} - -func GenericValuesMatch(gv1, gv2 GenericValue) (bool, string) { - if gv1.Type != gv2.Type { - return false, "type" - } - gv1ValueString := fmt.Sprintf("%v", gv1.Value) - gv2ValueString := fmt.Sprintf("%v", gv2.Value) - return gv1ValueString == gv2ValueString, "value" -} diff --git a/vendor/github.com/johnnadratowski/golang-neo4j-bolt-driver/LICENSE b/vendor/github.com/johnnadratowski/golang-neo4j-bolt-driver/LICENSE deleted file mode 100644 index 3e31520d..00000000 --- a/vendor/github.com/johnnadratowski/golang-neo4j-bolt-driver/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -MIT License - -Copyright (c) 2016 John A. Nadratowski III - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. \ No newline at end of file diff --git a/vendor/github.com/johnnadratowski/golang-neo4j-bolt-driver/README.md b/vendor/github.com/johnnadratowski/golang-neo4j-bolt-driver/README.md deleted file mode 100644 index 0bdf10e7..00000000 --- a/vendor/github.com/johnnadratowski/golang-neo4j-bolt-driver/README.md +++ /dev/null @@ -1,245 +0,0 @@ -# Golang Neo4J Bolt Driver -[![Build Status](https://travis-ci.org/johnnadratowski/golang-neo4j-bolt-driver.svg?branch=master)](https://travis-ci.org/johnnadratowski/golang-neo4j-bolt-driver) -[![GoDoc](https://godoc.org/github.com/johnnadratowski/golang-neo4j-bolt-driver?status.svg)](https://godoc.org/github.com/johnnadratowski/golang-neo4j-bolt-driver) - - -Implements the Neo4J Bolt Protocol specification: -As of the time of writing this, the current version is v3.1.0-M02 - -``` -go get github.com/johnnadratowski/golang-neo4j-bolt-driver -``` - -## Features - -* Neo4j Bolt low-level binary protocol support -* Message Pipelining for high concurrency -* Connection Pooling -* TLS support -* Compatible with sql.driver - -## Usage - -*_Please see [the statement tests](./stmt_test.go) or [the conn tests](./conn_test.go) for A LOT of examples of usage_* - -### Examples - -#### Quick n’ Dirty - -```go -func quickNDirty() { - driver := bolt.NewDriver() - conn, _ := driver.OpenNeo("bolt://localhost:7687") - defer conn.Close() - - // Start by creating a node - result, _ := conn.ExecNeo("CREATE (n:NODE {foo: {foo}, bar: {bar}})", map[string]interface{}{"foo": 1, "bar": 2.2}) - numResult, _ := result.RowsAffected() - fmt.Printf("CREATED ROWS: %d\n", numResult) // CREATED ROWS: 1 - - // Lets get the node - data, rowsMetadata, _, _ := conn.QueryNeoAll("MATCH (n:NODE) RETURN n.foo, n.bar", nil) - fmt.Printf("COLUMNS: %#v\n", rowsMetadata["fields"].([]interface{})) // COLUMNS: n.foo,n.bar - fmt.Printf("FIELDS: %d %f\n", data[0][0].(int64), data[0][1].(float64)) // FIELDS: 1 2.2 - - // oh cool, that worked. lets blast this baby and tell it to run a bunch of statements - // in neo concurrently with a pipeline - results, _ := conn.ExecPipeline([]string{ - "MATCH (n:NODE) CREATE (n)-[:REL]->(f:FOO)", - "MATCH (n:NODE) CREATE (n)-[:REL]->(b:BAR)", - "MATCH (n:NODE) CREATE (n)-[:REL]->(z:BAZ)", - "MATCH (n:NODE) CREATE (n)-[:REL]->(f:FOO)", - "MATCH (n:NODE) CREATE (n)-[:REL]->(b:BAR)", - "MATCH (n:NODE) CREATE (n)-[:REL]->(z:BAZ)", - }, nil, nil, nil, nil, nil, nil) - for _, result := range results { - numResult, _ := result.RowsAffected() - fmt.Printf("CREATED ROWS: %d\n", numResult) // CREATED ROWS: 2 (per each iteration) - } - - data, _, _, _ = conn.QueryNeoAll("MATCH (n:NODE)-[:REL]->(m) RETURN m", nil) - for _, row := range data { - fmt.Printf("NODE: %#v\n", row[0].(graph.Node)) // Prints all nodes - } - - result, _ = conn.ExecNeo(`MATCH (n) DETACH DELETE n`, nil) - numResult, _ = result.RowsAffected() - fmt.Printf("Rows Deleted: %d", numResult) // Rows Deleted: 13 -} -``` - -#### Slow n' Clean - -```go -func slowNClean() { - driver := bolt.NewDriver() - conn, err := driver.OpenNeo("bolt://localhost:7687") - if err != nil { - panic(err) - } - defer conn.Close() - - // Here we prepare a new statement. This gives us the flexibility to - // cancel that statement without any request sent to Neo - stmt, err := conn.PrepareNeo("CREATE (n:NODE {foo: {foo}, bar: {bar}})") - if err != nil { - panic(err) - } - - // Executing a statement just returns summary information - result, err := stmt.ExecNeo(map[string]interface{}{"foo": 1, "bar": 2.2}) - if err != nil { - panic(err) - } - numResult, err := result.RowsAffected() - if err != nil { - panic(err) - } - fmt.Printf("CREATED ROWS: %d\n", numResult) // CREATED ROWS: 1 - - // Closing the statment will also close the rows - stmt.Close() - - // Lets get the node. Once again I can cancel this with no penalty - stmt, err = conn.PrepareNeo("MATCH (n:NODE) RETURN n.foo, n.bar") - if err != nil { - panic(err) - } - - // Even once I get the rows, if I do not consume them and close the - // rows, Neo will discard and not send the data - rows, err := stmt.QueryNeo(nil) - if err != nil { - panic(err) - } - - // This interface allows you to consume rows one-by-one, as they - // come off the bolt stream. This is more efficient especially - // if you're only looking for a particular row/set of rows, as - // you don't need to load up the entire dataset into memory - data, _, err := rows.NextNeo() - if err != nil { - panic(err) - } - - // This query only returns 1 row, so once it's done, it will return - // the metadata associated with the query completion, along with - // io.EOF as the error - _, _, err = rows.NextNeo() - if err != io.EOF { - panic(err) - } - fmt.Printf("COLUMNS: %#v\n", rows.Metadata()["fields"].([]interface{})) // COLUMNS: n.foo,n.bar - fmt.Printf("FIELDS: %d %f\n", data[0].(int64), data[1].(float64)) // FIELDS: 1 2.2 - - stmt.Close() - - // Here we prepare a new pipeline statement for running multiple - // queries concurrently - pipeline, err := conn.PreparePipeline( - "MATCH (n:NODE) CREATE (n)-[:REL]->(f:FOO)", - "MATCH (n:NODE) CREATE (n)-[:REL]->(b:BAR)", - "MATCH (n:NODE) CREATE (n)-[:REL]->(z:BAZ)", - "MATCH (n:NODE) CREATE (n)-[:REL]->(f:FOO)", - "MATCH (n:NODE) CREATE (n)-[:REL]->(b:BAR)", - "MATCH (n:NODE) CREATE (n)-[:REL]->(z:BAZ)", - ) - if err != nil { - panic(err) - } - - pipelineResults, err := pipeline.ExecPipeline(nil, nil, nil, nil, nil, nil) - if err != nil { - panic(err) - } - - for _, result := range pipelineResults { - numResult, _ := result.RowsAffected() - fmt.Printf("CREATED ROWS: %d\n", numResult) // CREATED ROWS: 2 (per each iteration) - } - - err = pipeline.Close() - if err != nil { - panic(err) - } - - stmt, err = conn.PrepareNeo("MATCH path=(n:NODE)-[:REL]->(m) RETURN path") - if err != nil { - panic(err) - } - - rows, err = stmt.QueryNeo(nil) - if err != nil { - panic(err) - } - - // Here we loop through the rows until we get the metadata object - // back, meaning the row stream has been fully consumed - for err == nil { - var row []interface{} - row, _, err = rows.NextNeo() - if err != nil && err != io.EOF { - panic(err) - } else if err != io.EOF { - fmt.Printf("PATH: %#v\n", row[0].(graph.Path)) // Prints all paths - } - } - - stmt.Close() - - result, _ = conn.ExecNeo(`MATCH (n) DETACH DELETE n`, nil) - fmt.Println(result) - numResult, _ = result.RowsAffected() - fmt.Printf("Rows Deleted: %d", numResult) // Rows Deleted: 13 -} -``` -## API - -*_There is much more detailed information in [the godoc](http://godoc.org/github.com/johnnadratowski/golang-neo4j-bolt-driver)_* - -This implementation attempts to follow the best practices as per the Bolt specification, but also implements compatibility with Golang's `sql.driver` interface. - -As such, these interfaces closely match the `sql.driver` interfaces, but they also provide Neo4j Bolt specific functionality in addition to the `sql.driver` interface. - -It is recommended that you use the Neo4j Bolt-specific interfaces if possible. The implementation is more efficient and can more closely support the Neo4j Bolt feature set. - -The URL format is: `bolt://(user):(password)@(host):(port)` -Schema must be `bolt`. User and password is only necessary if you are authenticating. - -Connection pooling is provided out of the box with the `NewDriverPool` method. You can give it the maximum number of -connections to have at a time. - -You can get logs from the driver by setting the log level using the `log` packages `SetLevel`. - - -## Dev Quickstart - -``` -# Put in git hooks -ln -s ../../scripts/pre-commit .git/hooks/pre-commit -ln -s ../../scripts/pre-push .git/hooks/pre-push - -# No special build steps necessary -go build - -# Testing with log info and a local bolt DB, getting coverage output -BOLT_DRIVER_LOG=info NEO4J_BOLT=bolt://localhost:7687 go test -coverprofile=./tmp/cover.out -coverpkg=./... -v -race && go tool cover -html=./tmp/cover.out - -# Testing with trace output for debugging -BOLT_DRIVER_LOG=trace NEO4J_BOLT=bolt://localhost:7687 go test -v -race - -# Testing with running recorder to record tests for CI -BOLT_DRIVER_LOG=trace NEO4J_BOLT=bolt://localhost:7687 RECORD_OUTPUT=1 go test -v -race -``` - -The tests are written in an integration testing style. Most of them are in the statement tests, but should be made more granular in the future. - -In order to get CI, I made a recorder mechanism so you don't need to run neo4j alongside the tests in the CI server. You run the tests locally against a neo4j instance with the RECORD_OUTPUT=1 environment variable, it generates the recordings in the ./recordings folder. This is necessary if the tests have changed, or if the internals have significantly changed. Installing the git hooks will run the tests automatically on push. If there are updated tests, you will need to re-run the recorder to add them and push them as well. - -You need access to a running Neo4J database to develop for this project, so that you can run the tests to generate the recordings. - -## TODO - -* Cypher Parser to implement NumInput and pre-flight checking -* More Tests -* Benchmark Tests diff --git a/vendor/github.com/johnnadratowski/golang-neo4j-bolt-driver/conn.go b/vendor/github.com/johnnadratowski/golang-neo4j-bolt-driver/conn.go deleted file mode 100644 index d7e55bc3..00000000 --- a/vendor/github.com/johnnadratowski/golang-neo4j-bolt-driver/conn.go +++ /dev/null @@ -1,849 +0,0 @@ -package golangNeo4jBoltDriver - -import ( - "bytes" - "database/sql/driver" - "io/ioutil" - "net" - "time" - - "net/url" - "strings" - - "io" - "math" - - "crypto/tls" - "crypto/x509" - "strconv" - - "github.com/johnnadratowski/golang-neo4j-bolt-driver/encoding" - "github.com/johnnadratowski/golang-neo4j-bolt-driver/errors" - "github.com/johnnadratowski/golang-neo4j-bolt-driver/log" - "github.com/johnnadratowski/golang-neo4j-bolt-driver/structures/messages" -) - -// Conn represents a connection to Neo4J -// -// Implements a neo-friendly interface. -// Some of the features of this interface implement neo-specific features -// unavailable in the sql/driver compatible interface -// -// Conn objects, and any prepared statements/transactions within ARE NOT -// THREAD SAFE. If you want to use multipe go routines with these objects, -// you should use a driver to create a new conn for each routine. -type Conn interface { - // PrepareNeo prepares a neo4j specific statement - PrepareNeo(query string) (Stmt, error) - // PreparePipeline prepares a neo4j specific pipeline statement - // Useful for running multiple queries at the same time - PreparePipeline(query ...string) (PipelineStmt, error) - // QueryNeo queries using the neo4j-specific interface - QueryNeo(query string, params map[string]interface{}) (Rows, error) - // QueryNeoAll queries using the neo4j-specific interface and returns all row data and output metadata - QueryNeoAll(query string, params map[string]interface{}) ([][]interface{}, map[string]interface{}, map[string]interface{}, error) - // QueryPipeline queries using the neo4j-specific interface - // pipelining multiple statements - QueryPipeline(query []string, params ...map[string]interface{}) (PipelineRows, error) - // ExecNeo executes a query using the neo4j-specific interface - ExecNeo(query string, params map[string]interface{}) (Result, error) - // ExecPipeline executes a query using the neo4j-specific interface - // pipelining multiple statements - ExecPipeline(query []string, params ...map[string]interface{}) ([]Result, error) - // Close closes the connection - Close() error - // Begin starts a new transaction - Begin() (driver.Tx, error) - // SetChunkSize is used to set the max chunk size of the - // bytes to send to Neo4j at once - SetChunkSize(uint16) - // SetTimeout sets the read/write timeouts for the - // connection to Neo4j - SetTimeout(time.Duration) -} - -type boltConn struct { - connStr string - url *url.URL - user string - password string - conn net.Conn - connErr error - serverVersion []byte - timeout time.Duration - chunkSize uint16 - closed bool - useTLS bool - certFile string - caCertFile string - keyFile string - tlsNoVerify bool - transaction *boltTx - statement *boltStmt - driver *boltDriver - poolDriver DriverPool -} - -func createBoltConn(connStr string) *boltConn { - return &boltConn{ - connStr: connStr, - timeout: time.Second * time.Duration(60), - chunkSize: math.MaxUint16, - serverVersion: make([]byte, 4), - } -} - -// newBoltConn Creates a new bolt connection -func newBoltConn(connStr string, driver *boltDriver) (*boltConn, error) { - - c := createBoltConn(connStr) - c.driver = driver - - err := c.initialize() - if err != nil { - return nil, errors.Wrap(err, "An error occurred initializing connection") - } - - return c, nil -} - -// newPooledBoltConn Creates a new bolt connection with a pooled driver -func newPooledBoltConn(connStr string, driver DriverPool) (*boltConn, error) { - - c := createBoltConn(connStr) - c.poolDriver = driver - - return c, nil -} - -func (c *boltConn) parseURL() (*url.URL, error) { - user := "" - password := "" - url, err := url.Parse(c.connStr) - if err != nil { - return url, errors.Wrap(err, "An error occurred parsing bolt URL") - } else if strings.ToLower(url.Scheme) != "bolt" { - return url, errors.New("Unsupported connection string scheme: %s. Driver only supports 'bolt' scheme.", url.Scheme) - } - - if url.User != nil { - c.user = url.User.Username() - var isSet bool - c.password, isSet = url.User.Password() - if !isSet { - return url, errors.New("Must specify password when passing user") - } - } - - timeout := url.Query().Get("timeout") - if timeout != "" { - timeoutInt, err := strconv.Atoi(timeout) - if err != nil { - return url, errors.New("Invalid format for timeout: %s. Must be integer", timeout) - } - - c.timeout = time.Duration(timeoutInt) * time.Second - } - - useTLS := url.Query().Get("tls") - c.useTLS = strings.HasPrefix(strings.ToLower(useTLS), "t") || useTLS == "1" - - if c.useTLS { - c.certFile = url.Query().Get("tls_cert_file") - c.keyFile = url.Query().Get("tls_key_file") - c.caCertFile = url.Query().Get("tls_ca_cert_file") - noVerify := url.Query().Get("tls_no_verify") - c.tlsNoVerify = strings.HasPrefix(strings.ToLower(noVerify), "t") || noVerify == "1" - } - - log.Trace("Bolt Host: ", url.Host) - log.Trace("Timeout: ", c.timeout) - log.Trace("User: ", user) - log.Trace("Password: ", password) - log.Trace("TLS: ", c.useTLS) - log.Trace("TLS No Verify: ", c.tlsNoVerify) - log.Trace("Cert File: ", c.certFile) - log.Trace("Key File: ", c.keyFile) - log.Trace("CA Cert File: ", c.caCertFile) - - return url, nil -} - -func (c *boltConn) createConn() (net.Conn, error) { - - var err error - c.url, err = c.parseURL() - if err != nil { - return nil, errors.Wrap(err, "An error occurred parsing the conn URL") - } - - var conn net.Conn - if c.useTLS { - config, err := c.tlsConfig() - if err != nil { - return nil, errors.Wrap(err, "An error occurred setting up TLS configuration") - } - conn, err = tls.Dial("tcp", c.url.Host, config) - if err != nil { - return nil, errors.Wrap(err, "An error occurred dialing to neo4j") - } - } else { - conn, err = net.DialTimeout("tcp", c.url.Host, c.timeout) - if err != nil { - return nil, errors.Wrap(err, "An error occurred dialing to neo4j") - } - } - - return conn, nil -} - -func (c *boltConn) tlsConfig() (*tls.Config, error) { - config := &tls.Config{ - MinVersion: tls.VersionTLS10, - MaxVersion: tls.VersionTLS12, - } - - if c.caCertFile != "" { - // Load CA cert - usually for self-signed certificates - caCert, err := ioutil.ReadFile(c.caCertFile) - if err != nil { - return nil, err - } - caCertPool := x509.NewCertPool() - caCertPool.AppendCertsFromPEM(caCert) - - config.RootCAs = caCertPool - } - - if c.certFile != "" { - if c.keyFile == "" { - return nil, errors.New("If you're providing a cert file, you must also provide a key file") - } - - cert, err := tls.LoadX509KeyPair(c.certFile, c.keyFile) - if err != nil { - return nil, err - } - - config.Certificates = []tls.Certificate{cert} - } - - if c.tlsNoVerify { - config.InsecureSkipVerify = true - } - - return config, nil -} - -func (c *boltConn) handShake() error { - - numWritten, err := c.Write(handShake) - if numWritten != 20 { - log.Errorf("Couldn't write expected bytes for magic preamble + supported versions. Written: %d. Expected: 4", numWritten) - if err != nil { - err = errors.Wrap(err, "An error occurred writing magic preamble + supported versions") - } - return err - } - - numRead, err := c.Read(c.serverVersion) - if numRead != 4 { - log.Errorf("Could not read server version response. Read %d bytes. Expected 4 bytes. Output: %s", numRead, c.serverVersion) - if err != nil { - err = errors.Wrap(err, "An error occurred reading server version") - } - return err - } else if bytes.Equal(c.serverVersion, noVersionSupported) { - return errors.New("Server responded with no supported version") - } - - return nil -} - -func (c *boltConn) initialize() error { - - // Handle recorder. If there is no conn string, assume we're playing back a recording. - // If there is a recorder and a conn string, assume we're recording the connection - // Else, just create the conn normally - var err error - if c.connStr == "" && c.driver != nil && c.driver.recorder != nil { - c.conn = c.driver.recorder - } else if c.driver != nil && c.driver.recorder != nil { - c.driver.recorder.Conn, err = c.createConn() - if err != nil { - return err - } - c.conn = c.driver.recorder - } else { - c.conn, err = c.createConn() - if err != nil { - return err - } - } - - if err := c.handShake(); err != nil { - if e := c.Close(); e != nil { - log.Errorf("An error occurred closing connection: %s", e) - } - return err - } - - respInt, err := c.sendInit() - if err != nil { - if e := c.Close(); e != nil { - log.Errorf("An error occurred closing connection: %s", e) - } - return err - } - - switch resp := respInt.(type) { - case messages.SuccessMessage: - log.Infof("Successfully initiated Bolt connection: %+v", resp) - return nil - default: - log.Errorf("Got an unrecognized message when initializing connection :%+v", resp) - c.connErr = errors.New("Unrecognized response from the server: %#v", resp) - c.Close() - return driver.ErrBadConn - } -} - -// Read reads the data from the underlying connection -func (c *boltConn) Read(b []byte) (n int, err error) { - if err := c.conn.SetReadDeadline(time.Now().Add(c.timeout)); err != nil { - c.connErr = errors.Wrap(err, "An error occurred setting read deadline") - return 0, driver.ErrBadConn - } - - n, err = c.conn.Read(b) - - if log.GetLevel() >= log.TraceLevel { - log.Tracef("Read %d bytes from stream:\n\n%s\n", n, sprintByteHex(b)) - } - - if err != nil && err != io.EOF { - c.connErr = errors.Wrap(err, "An error occurred reading from stream") - err = driver.ErrBadConn - } - return n, err -} - -// Write writes the data to the underlying connection -func (c *boltConn) Write(b []byte) (n int, err error) { - if err := c.conn.SetWriteDeadline(time.Now().Add(c.timeout)); err != nil { - c.connErr = errors.Wrap(err, "An error occurred setting write deadline") - return 0, driver.ErrBadConn - } - - n, err = c.conn.Write(b) - - if log.GetLevel() >= log.TraceLevel { - log.Tracef("Wrote %d of %d bytes to stream:\n\n%s\n", len(b), n, sprintByteHex(b[:n])) - } - - if err != nil { - c.connErr = errors.Wrap(err, "An error occurred writing to stream") - err = driver.ErrBadConn - } - return n, err -} - -// Close closes the connection -// Driver may allow for pooling in the future, keeping connections alive -func (c *boltConn) Close() error { - - if c.closed { - return nil - } - - if c.statement != nil { - if err := c.statement.Close(); err != nil { - return err - } - } - - if c.transaction != nil { - if err := c.transaction.Rollback(); err != nil { - return errors.Wrap(err, "Error rolling back transaction when closing connection") - } - } - - if c.poolDriver != nil { - // If using connection pooling, don't close connection, just reclaim it - err := c.poolDriver.reclaim(c) - if err != nil { - log.Errorf("An error occurred reclaiming connection for pool: %s", err) - c.connErr = errors.Wrap(err, "An error occurred closing the connection") - return driver.ErrBadConn - } - return nil - } - - err := c.conn.Close() - c.closed = true - if err != nil { - c.connErr = errors.Wrap(err, "An error occurred closing the connection") - return driver.ErrBadConn - } - - return nil -} - -func (c *boltConn) ackFailure(failure messages.FailureMessage) error { - log.Infof("Acknowledging Failure: %#v", failure) - - ack := messages.NewAckFailureMessage() - err := encoding.NewEncoder(c, c.chunkSize).Encode(ack) - if err != nil { - return errors.Wrap(err, "An error occurred encoding ack failure message") - } - - for { - respInt, err := encoding.NewDecoder(c).Decode() - if err != nil { - return errors.Wrap(err, "An error occurred decoding ack failure message response") - } - - switch resp := respInt.(type) { - case messages.IgnoredMessage: - log.Infof("Got ignored message when acking failure: %#v", resp) - continue - case messages.SuccessMessage: - log.Infof("Got success message when acking failure: %#v", resp) - return nil - case messages.FailureMessage: - log.Errorf("Got failure message when acking failure: %#v", resp) - return c.reset() - default: - log.Errorf("Got unrecognized response from acking failure: %#v", resp) - c.connErr = errors.New("Got unrecognized response from acking failure: %#v. CLOSING SESSION!", resp) - c.Close() - return driver.ErrBadConn - } - } -} - -func (c *boltConn) reset() error { - log.Info("Resetting session") - - reset := messages.NewResetMessage() - err := encoding.NewEncoder(c, c.chunkSize).Encode(reset) - if err != nil { - return errors.Wrap(err, "An error occurred encoding reset message") - } - - for { - respInt, err := encoding.NewDecoder(c).Decode() - if err != nil { - return errors.Wrap(err, "An error occurred decoding reset message response") - } - - switch resp := respInt.(type) { - case messages.IgnoredMessage: - log.Infof("Got ignored message when resetting session: %#v", resp) - continue - case messages.SuccessMessage: - log.Infof("Got success message when resetting session: %#v", resp) - return nil - case messages.FailureMessage: - log.Errorf("Got failure message when resetting session: %#v", resp) - err = c.Close() - if err != nil { - log.Errorf("An error occurred closing the session: %s", err) - } - return errors.Wrap(resp, "Error resetting session. CLOSING SESSION!") - default: - log.Errorf("Got unrecognized response from resetting session: %#v", resp) - c.connErr = errors.New("Got unrecognized response from resetting session: %#v. CLOSING SESSION!", resp) - c.Close() - return driver.ErrBadConn - } - } -} - -// Prepare prepares a new statement for a query -func (c *boltConn) Prepare(query string) (driver.Stmt, error) { - return c.prepare(query) -} - -// Prepare prepares a new statement for a query. Implements a Neo-friendly alternative to sql/driver. -func (c *boltConn) PrepareNeo(query string) (Stmt, error) { - return c.prepare(query) -} - -// PreparePipeline prepares a new pipeline statement for a query. -func (c *boltConn) PreparePipeline(queries ...string) (PipelineStmt, error) { - if c.statement != nil { - return nil, errors.New("An open statement already exists") - } - if c.closed { - return nil, errors.New("Connection already closed") - } - c.statement = newPipelineStmt(queries, c) - return c.statement, nil -} - -func (c *boltConn) prepare(query string) (*boltStmt, error) { - if c.statement != nil { - return nil, errors.New("An open statement already exists") - } - if c.closed { - return nil, errors.New("Connection already closed") - } - c.statement = newStmt(query, c) - return c.statement, nil -} - -// Begin begins a new transaction with the Neo4J Database -func (c *boltConn) Begin() (driver.Tx, error) { - if c.transaction != nil { - return nil, errors.New("An open transaction already exists") - } - if c.statement != nil { - return nil, errors.New("Cannot open a transaction when you already have an open statement") - } - if c.closed { - return nil, errors.New("Connection already closed") - } - - successInt, pullInt, err := c.sendRunPullAllConsumeSingle("BEGIN", nil) - if err != nil { - return nil, errors.Wrap(err, "An error occurred beginning transaction") - } - - success, ok := successInt.(messages.SuccessMessage) - if !ok { - return nil, errors.New("Unrecognized response type beginning transaction: %#v", success) - } - - log.Infof("Got success message beginning transaction: %#v", success) - - success, ok = pullInt.(messages.SuccessMessage) - if !ok { - return nil, errors.New("Unrecognized response type pulling transaction: %#v", success) - } - - log.Infof("Got success message pulling transaction: %#v", success) - - return newTx(c), nil -} - -// Sets the size of the chunks to write to the stream -func (c *boltConn) SetChunkSize(chunkSize uint16) { - c.chunkSize = chunkSize -} - -// Sets the timeout for reading and writing to the stream -func (c *boltConn) SetTimeout(timeout time.Duration) { - c.timeout = timeout -} - -func (c *boltConn) consume() (interface{}, error) { - log.Info("Consuming response from bolt stream") - - respInt, err := encoding.NewDecoder(c).Decode() - if err != nil { - return respInt, err - } - - if log.GetLevel() >= log.TraceLevel { - log.Tracef("Consumed Response: %#v", respInt) - } - - if failure, isFail := respInt.(messages.FailureMessage); isFail { - log.Errorf("Got failure message: %#v", failure) - err := c.ackFailure(failure) - if err != nil { - return nil, err - } - return failure, errors.Wrap(failure, "Neo4J reported a failure for the query") - } - - return respInt, err -} - -func (c *boltConn) consumeAll() ([]interface{}, interface{}, error) { - log.Info("Consuming all responses until success/failure") - - responses := []interface{}{} - for { - respInt, err := c.consume() - if err != nil { - return nil, respInt, err - } - - if success, isSuccess := respInt.(messages.SuccessMessage); isSuccess { - log.Infof("Got success message: %#v", success) - return responses, success, nil - } - - responses = append(responses, respInt) - } -} - -func (c *boltConn) consumeAllMultiple(mult int) ([][]interface{}, []interface{}, error) { - log.Info("Consuming all responses %d times until success/failure", mult) - - responses := make([][]interface{}, mult) - successes := make([]interface{}, mult) - for i := 0; i < mult; i++ { - - resp, success, err := c.consumeAll() - if err != nil { - return responses, successes, err - } - - responses[i] = resp - successes[i] = success - } - - return responses, successes, nil -} - -func (c *boltConn) sendInit() (interface{}, error) { - log.Infof("Sending INIT Message. ClientID: %s User: %s Password: %s", ClientID, c.user, c.password) - - initMessage := messages.NewInitMessage(ClientID, c.user, c.password) - if err := encoding.NewEncoder(c, c.chunkSize).Encode(initMessage); err != nil { - return nil, errors.Wrap(err, "An error occurred sending init message") - } - - return c.consume() -} - -func (c *boltConn) sendRun(query string, args map[string]interface{}) error { - log.Infof("Sending RUN message: query %s (args: %#v)", query, args) - runMessage := messages.NewRunMessage(query, args) - if err := encoding.NewEncoder(c, c.chunkSize).Encode(runMessage); err != nil { - return errors.Wrap(err, "An error occurred running query") - } - - return nil -} - -func (c *boltConn) sendRunConsume(query string, args map[string]interface{}) (interface{}, error) { - if err := c.sendRun(query, args); err != nil { - return nil, err - } - - return c.consume() -} - -func (c *boltConn) sendPullAll() error { - log.Infof("Sending PULL_ALL message") - - pullAllMessage := messages.NewPullAllMessage() - err := encoding.NewEncoder(c, c.chunkSize).Encode(pullAllMessage) - if err != nil { - return errors.Wrap(err, "An error occurred encoding pull all query") - } - - return nil -} - -func (c *boltConn) sendPullAllConsume() (interface{}, error) { - if err := c.sendPullAll(); err != nil { - return nil, err - } - - return c.consume() -} - -func (c *boltConn) sendRunPullAll(query string, args map[string]interface{}) error { - err := c.sendRun(query, args) - if err != nil { - return err - } - - return c.sendPullAll() -} - -func (c *boltConn) sendRunPullAllConsumeRun(query string, args map[string]interface{}) (interface{}, error) { - err := c.sendRunPullAll(query, args) - if err != nil { - return nil, err - } - - return c.consume() -} - -func (c *boltConn) sendRunPullAllConsumeSingle(query string, args map[string]interface{}) (interface{}, interface{}, error) { - err := c.sendRunPullAll(query, args) - if err != nil { - return nil, nil, err - } - - runSuccess, err := c.consume() - if err != nil { - return runSuccess, nil, err - } - - pullSuccess, err := c.consume() - return runSuccess, pullSuccess, err -} - -func (c *boltConn) sendRunPullAllConsumeAll(query string, args map[string]interface{}) (interface{}, interface{}, []interface{}, error) { - err := c.sendRunPullAll(query, args) - if err != nil { - return nil, nil, nil, err - } - - runSuccess, err := c.consume() - if err != nil { - return runSuccess, nil, nil, err - } - - records, pullSuccess, err := c.consumeAll() - return runSuccess, pullSuccess, records, err -} - -func (c *boltConn) sendDiscardAll() error { - log.Infof("Sending DISCARD_ALL message") - - discardAllMessage := messages.NewDiscardAllMessage() - err := encoding.NewEncoder(c, c.chunkSize).Encode(discardAllMessage) - if err != nil { - return errors.Wrap(err, "An error occurred encoding discard all query") - } - - return nil -} - -func (c *boltConn) sendDiscardAllConsume() (interface{}, error) { - if err := c.sendDiscardAll(); err != nil { - return nil, err - } - - return c.consume() -} - -func (c *boltConn) sendRunDiscardAll(query string, args map[string]interface{}) error { - err := c.sendRun(query, args) - if err != nil { - return err - } - - return c.sendDiscardAll() -} - -func (c *boltConn) sendRunDiscardAllConsume(query string, args map[string]interface{}) (interface{}, interface{}, error) { - runResp, err := c.sendRunConsume(query, args) - if err != nil { - return runResp, nil, err - } - - discardResp, err := c.sendDiscardAllConsume() - return runResp, discardResp, err -} - -func (c *boltConn) Query(query string, args []driver.Value) (driver.Rows, error) { - params, err := driverArgsToMap(args) - if err != nil { - return nil, err - } - return c.queryNeo(query, params) -} - -func (c *boltConn) QueryNeo(query string, params map[string]interface{}) (Rows, error) { - return c.queryNeo(query, params) -} - -func (c *boltConn) QueryNeoAll(query string, params map[string]interface{}) ([][]interface{}, map[string]interface{}, map[string]interface{}, error) { - rows, err := c.queryNeo(query, params) - if err != nil { - return nil, nil, nil, err - } - defer rows.Close() - - data, metadata, err := rows.All() - return data, rows.metadata, metadata, err -} - -func (c *boltConn) queryNeo(query string, params map[string]interface{}) (*boltRows, error) { - if c.statement != nil { - return nil, errors.New("An open statement already exists") - } - if c.closed { - return nil, errors.New("Connection already closed") - } - - c.statement = newStmt(query, c) - - // Pipeline the run + pull all for this - successResp, err := c.sendRunPullAllConsumeRun(c.statement.query, params) - if err != nil { - return nil, err - } - success, ok := successResp.(messages.SuccessMessage) - if !ok { - return nil, errors.New("Unexpected response querying neo from connection: %#v", successResp) - } - - c.statement.rows = newQueryRows(c.statement, success.Metadata) - return c.statement.rows, nil -} - -func (c *boltConn) QueryPipeline(queries []string, params ...map[string]interface{}) (PipelineRows, error) { - if c.statement != nil { - return nil, errors.New("An open statement already exists") - } - if c.closed { - return nil, errors.New("Connection already closed") - } - - c.statement = newPipelineStmt(queries, c) - rows, err := c.statement.QueryPipeline(params...) - if err != nil { - return nil, err - } - - // Since we're not exposing the statement, - // tell the rows to close it when they are closed - rows.(*boltRows).closeStatement = true - return rows, nil -} - -// Exec executes a query that returns no rows. See sql/driver.Stmt. -// You must bolt encode a map to pass as []bytes for the driver value -func (c *boltConn) Exec(query string, args []driver.Value) (driver.Result, error) { - if c.statement != nil { - return nil, errors.New("An open statement already exists") - } - if c.closed { - return nil, errors.New("Connection already closed") - } - - stmt := newStmt(query, c) - defer stmt.Close() - - return stmt.Exec(args) -} - -// ExecNeo executes a query that returns no rows. Implements a Neo-friendly alternative to sql/driver. -func (c *boltConn) ExecNeo(query string, params map[string]interface{}) (Result, error) { - if c.statement != nil { - return nil, errors.New("An open statement already exists") - } - if c.closed { - return nil, errors.New("Connection already closed") - } - - stmt := newStmt(query, c) - defer stmt.Close() - - return stmt.ExecNeo(params) -} - -func (c *boltConn) ExecPipeline(queries []string, params ...map[string]interface{}) ([]Result, error) { - if c.statement != nil { - return nil, errors.New("An open statement already exists") - } - if c.closed { - return nil, errors.New("Connection already closed") - } - - stmt := newPipelineStmt(queries, c) - defer stmt.Close() - - return stmt.ExecPipeline(params...) -} diff --git a/vendor/github.com/johnnadratowski/golang-neo4j-bolt-driver/doc.go b/vendor/github.com/johnnadratowski/golang-neo4j-bolt-driver/doc.go deleted file mode 100644 index 8db5d084..00000000 --- a/vendor/github.com/johnnadratowski/golang-neo4j-bolt-driver/doc.go +++ /dev/null @@ -1,95 +0,0 @@ -/*Package golangNeo4jBoltDriver implements a driver for the Neo4J Bolt Protocol. - -The driver is compatible with Golang's sql.driver interface, but -aims to implement a more complete featureset in line with what -Neo4J and Bolt provides. - -As such, there are multiple interfaces the user can choose from. -It's highly recommended that the user use the Neo4J-specific -interfaces as they are more flexible and efficient than the -provided sql.driver compatible methods. - -The interface tries to be consistent throughout. The sql.driver -interfaces are standard, but the Neo4J-specific ones contain a -naming convention of either "Neo" or "Pipeline". - -The "Neo" ones are the basic interfaces for making queries to -Neo4j and it's expected that these would be used the most. - -The "Pipeline" ones are to support Bolt's pipelining features. -Pipelines allow the user to send Neo4j many queries at once and -have them executed by the database concurrently. This is useful -if you have a bunch of queries that aren't necessarily dependant -on one another, and you want to get better performance. The -internal APIs will also pipeline statements where it is able to -reliably do so, but by manually using the pipelining feature -you can maximize your throughput. - -The API provides connection pooling using the `NewDriverPool` method. -This allows you to pass it the maximum number of open connections -to be used in the pool. Once this limit is hit, any new clients will -have to wait for a connection to become available again. - -The sql driver is registered as "neo4j-bolt". The sql.driver interface -is much more limited than what bolt and neo4j supports. In some cases, -concessions were made in order to make that interface work with the -neo4j way of doing things. The main instance of this is the marshalling -of objects to/from the sql.driver.Value interface. In order to support -object types that aren't supported by this interface, the internal encoding -package is used to marshal these objects to byte strings. This ultimately -makes for a less efficient and more 'clunky' implementation. A glaring -instance of this is passing parameters. Neo4j expects named parameters -but the driver interface can only really support positional parameters. -To get around this, the user must create a map[string]interface{} of their -parameters and marshal it to a driver.Value using the encoding.Marshal -function. Similarly, the user must unmarshal data returned from the queries -using the encoding.Unmarshal function, then use type assertions to retrieve -the proper type. - -In most cases the driver will return the data from neo as the proper -go-specific types. For integers they always come back -as int64 and floats always come back as float64. This is for the -convenience of the user and acts similarly to go's JSON interface. -This prevents the user from having to use reflection to get -these values. Internally, the types are always transmitted over -the wire with as few bytes as possible. - -There are also cases where no go-specific type matches the returned values, -such as when you query for a node, relationship, or path. The driver -exposes specific structs which represent this data in the 'structures.graph' -package. There are 4 types - Node, Relationship, UnboundRelationship, and -Path. The driver returns interface{} objects which must have their types -properly asserted to get the data out. - -There are some limitations to the types of collections the driver -supports. Specifically, maps should always be of type map[string]interface{} -and lists should always be of type []interface{}. It doesn't seem that -the Bolt protocol supports uint64 either, so the biggest number it can send -right now is the int64 max. - -The URL format is: `bolt://(user):(password)@(host):(port)` -Schema must be `bolt`. User and password is only necessary if you are authenticating. -TLS is supported by using query parameters on the connection string, like so: -`bolt://host:port?tls=true&tls_no_verify=false` - -The supported query params are: - -* timeout - the number of seconds to set the connection timeout to. Defaults to 60 seconds. -* tls - Set to 'true' or '1' if you want to use TLS encryption -* tls_no_verify - Set to 'true' or '1' if you want to accept any server certificate (for testing, not secure) -* tls_ca_cert_file - path to a custom ca cert for a self-signed TLS cert -* tls_cert_file - path to a cert file for this client (need to verify this is processed by Neo4j) -* tls_key_file - path to a key file for this client (need to verify this is processed by Neo4j) - -Errors returned from the API support wrapping, so if you receive an error -from the library, it might be wrapping other errors. You can get the innermost -error by using the `InnerMost` method. Failure messages from Neo4J are reported, -along with their metadata, as an error. In order to get the failure message metadata -from a wrapped error, you can do so by calling -`err.(*errors.Error).InnerMost().(messages.FailureMessage).Metadata` - -If there is an error with the database connection, you should get a sql/driver ErrBadConn -as per the best practice recommendations of the Golang SQL Driver. However, this error -may be wrapped, so you might have to call `InnerMost` to get it, as specified above. -*/ -package golangNeo4jBoltDriver diff --git a/vendor/github.com/johnnadratowski/golang-neo4j-bolt-driver/driver.go b/vendor/github.com/johnnadratowski/golang-neo4j-bolt-driver/driver.go deleted file mode 100644 index 9703b9bf..00000000 --- a/vendor/github.com/johnnadratowski/golang-neo4j-bolt-driver/driver.go +++ /dev/null @@ -1,186 +0,0 @@ -package golangNeo4jBoltDriver - -import ( - "database/sql" - "database/sql/driver" - "sync" - "github.com/johnnadratowski/golang-neo4j-bolt-driver/errors" -) - -var ( - magicPreamble = []byte{0x60, 0x60, 0xb0, 0x17} - supportedVersions = []byte{ - 0x00, 0x00, 0x00, 0x01, - 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, - } - handShake = append(magicPreamble, supportedVersions...) - noVersionSupported = []byte{0x00, 0x00, 0x00, 0x00} - // Version is the current version of this driver - Version = "1.0" - // ClientID is the id of this client - ClientID = "GolangNeo4jBolt/" + Version -) - -// Driver is a driver allowing connection to Neo4j -// The driver allows you to open a new connection to Neo4j -// -// Implements sql/driver, but also includes its own more neo-friendly interface. -// Some of the features of this interface implement neo-specific features -// unavailable in the sql/driver compatible interface -// -// Driver objects should be THREAD SAFE, so you can use them -// to open connections in multiple threads. The connection objects -// themselves, and any prepared statements/transactions within ARE NOT -// THREAD SAFE. -type Driver interface { - // Open opens a sql.driver compatible connection. Used internally - // by the go sql interface - Open(string) (driver.Conn, error) - // OpenNeo opens a Neo-specific connection. This should be used - // directly when not using the golang sql interface - OpenNeo(string) (Conn, error) -} - -type boltDriver struct { - recorder *recorder -} - -// NewDriver creates a new Driver object -func NewDriver() Driver { - return &boltDriver{} -} - -// Open opens a new Bolt connection to the Neo4J database -func (d *boltDriver) Open(connStr string) (driver.Conn, error) { - return newBoltConn(connStr, d) // Never use pooling when using SQL driver -} - -// Open opens a new Bolt connection to the Neo4J database. Implements a Neo-friendly alternative to sql/driver. -func (d *boltDriver) OpenNeo(connStr string) (Conn, error) { - return newBoltConn(connStr, d) -} - -// DriverPool is a driver allowing connection to Neo4j with support for connection pooling -// The driver allows you to open a new connection to Neo4j -// -// Driver objects should be THREAD SAFE, so you can use them -// to open connections in multiple threads. The connection objects -// themselves, and any prepared statements/transactions within ARE NOT -// THREAD SAFE. -type DriverPool interface { - // OpenPool opens a Neo-specific connection. - OpenPool() (Conn, error) - reclaim(*boltConn) error -} - -// ClosableDriverPool like the DriverPool but with a closable function -type ClosableDriverPool interface { - DriverPool - Close() error -} - -type boltDriverPool struct { - connStr string - maxConns int - pool chan *boltConn - connRefs []*boltConn - refLock sync.Mutex - closed bool -} - -// NewDriverPool creates a new Driver object with connection pooling -func NewDriverPool(connStr string, max int) (DriverPool, error) { - return createDriverPool(connStr, max) -} - -// NewClosableDriverPool create a closable driver pool -func NewClosableDriverPool(connStr string, max int) (ClosableDriverPool, error) { - return createDriverPool(connStr, max) -} - -func createDriverPool(connStr string, max int) (*boltDriverPool, error) { - d := &boltDriverPool{ - connStr: connStr, - maxConns: max, - pool: make(chan *boltConn, max), - } - - for i := 0; i < max; i++ { - conn, err := newPooledBoltConn(connStr, d) - if err != nil { - return nil, err - } - - d.pool <- conn - } - - return d, nil -} - -// OpenNeo opens a new Bolt connection to the Neo4J database. -func (d *boltDriverPool) OpenPool() (Conn, error) { - // For each connection request we need to block in case the Close function is called. This gives us a guarantee - // when closing the pool no new connections are made. - d.refLock.Lock() - defer d.refLock.Unlock() - if !d.closed { - conn := <-d.pool - if conn.conn == nil { - if err := conn.initialize(); err != nil { - // Return the connection back into the pool - d.pool <- conn - return nil, err - } - d.connRefs = append(d.connRefs, conn) - } - return conn, nil - } else { - return nil, errors.New("Driver pool has been closed") - } -} - -// Close all connections in the pool -func (d *boltDriverPool) Close() error { - // Lock the connection ref so no new connections can be added - d.refLock.Lock() - defer d.refLock.Unlock() - for _, conn := range d.connRefs { - // Remove the reference to the pool, to allow a clean up of the connection - conn.poolDriver = nil - err := conn.Close() - if err != nil { - d.closed = true - return err - } - } - // Mark the pool as closed to stop any new connections - d.closed = true - return nil -} - -func (d *boltDriverPool) reclaim(conn *boltConn) error { - var newConn *boltConn - var err error - if conn.connErr != nil || conn.closed { - newConn, err = newPooledBoltConn(d.connStr, d) - if err != nil { - return err - } - } else { - // sneakily swap out connection so a reference to - // it isn't held on to - newConn = &boltConn{} - *newConn = *conn - } - - d.pool <- newConn - conn = nil - - return nil -} - -func init() { - sql.Register("neo4j-bolt", &boltDriver{}) -} diff --git a/vendor/github.com/johnnadratowski/golang-neo4j-bolt-driver/encoding/decoder.go b/vendor/github.com/johnnadratowski/golang-neo4j-bolt-driver/encoding/decoder.go deleted file mode 100644 index 7d88f549..00000000 --- a/vendor/github.com/johnnadratowski/golang-neo4j-bolt-driver/encoding/decoder.go +++ /dev/null @@ -1,529 +0,0 @@ -package encoding - -import ( - "bytes" - "encoding/binary" - "io" - - "github.com/johnnadratowski/golang-neo4j-bolt-driver/errors" - "github.com/johnnadratowski/golang-neo4j-bolt-driver/structures/graph" - "github.com/johnnadratowski/golang-neo4j-bolt-driver/structures/messages" -) - -// Decoder decodes a message from the bolt protocol stream -// Attempts to support all builtin golang types, when it can be confidently -// mapped to a data type from: http://alpha.neohq.net/docs/server-manual/bolt-serialization.html#bolt-packstream-structures -// (version v3.1.0-M02 at the time of writing this. -// -// Maps and Slices are a special case, where only -// map[string]interface{} and []interface{} are supported. -// The interface for maps and slices may be more permissive in the future. -type Decoder struct { - r io.Reader - buf *bytes.Buffer -} - -// NewDecoder Creates a new Decoder object -func NewDecoder(r io.Reader) Decoder { - return Decoder{ - r: r, - buf: &bytes.Buffer{}, - } -} - -// Unmarshal is used to marshal an object to the bolt interface encoded bytes -func Unmarshal(b []byte) (interface{}, error) { - return NewDecoder(bytes.NewBuffer(b)).Decode() -} - -// Read out the object bytes to decode -func (d Decoder) read() (*bytes.Buffer, error) { - output := &bytes.Buffer{} - for { - lengthBytes := make([]byte, 2) - if numRead, err := d.r.Read(lengthBytes); numRead != 2 { - return nil, errors.Wrap(err, "Couldn't read expected bytes for message length. Read: %d Expected: 2.", numRead) - } - - // Chunk header contains length of current message - messageLen := binary.BigEndian.Uint16(lengthBytes) - if messageLen == 0 { - // If the length is 0, the chunk is done. - return output, nil - } - - data, err := d.readData(messageLen) - if err != nil { - return output, errors.Wrap(err, "An error occurred reading message data") - } - - numWritten, err := output.Write(data) - if numWritten < len(data) { - return output, errors.New("Didn't write full data on output. Expected: %d Wrote: %d", len(data), numWritten) - } else if err != nil { - return output, errors.Wrap(err, "Error writing data to output") - } - } -} - -func (d Decoder) readData(messageLen uint16) ([]byte, error) { - output := make([]byte, messageLen) - var totalRead uint16 - for totalRead < messageLen { - data := make([]byte, messageLen-totalRead) - numRead, err := d.r.Read(data) - if err != nil { - return nil, errors.Wrap(err, "An error occurred reading from stream") - } else if numRead == 0 { - return nil, errors.Wrap(err, "Couldn't read expected bytes for message. Read: %d Expected: %d.", totalRead, messageLen) - } - - for idx, b := range data { - output[uint16(idx)+totalRead] = b - } - - totalRead += uint16(numRead) - } - - return output, nil -} - -// Decode decodes the stream to an object -func (d Decoder) Decode() (interface{}, error) { - data, err := d.read() - if err != nil { - return nil, err - } - - return d.decode(data) -} - -func (d Decoder) decode(buffer *bytes.Buffer) (interface{}, error) { - - marker, err := buffer.ReadByte() - if err != nil { - return nil, errors.Wrap(err, "Error reading marker") - } - - // Here we have to get the marker as an int to check and see - // if it's a TINYINT - var markerInt int8 - err = binary.Read(bytes.NewBuffer([]byte{marker}), binary.BigEndian, &markerInt) - if err != nil { - return nil, errors.Wrap(err, "Error reading marker as int8 from bolt message") - } - - switch { - - // NIL - case marker == NilMarker: - return nil, nil - - // BOOL - case marker == TrueMarker: - return true, nil - case marker == FalseMarker: - return false, nil - - // INT - case markerInt >= -16 && markerInt <= 127: - return int64(int8(marker)), nil - case marker == Int8Marker: - var out int8 - err := binary.Read(buffer, binary.BigEndian, &out) - return int64(out), err - case marker == Int16Marker: - var out int16 - err := binary.Read(buffer, binary.BigEndian, &out) - return int64(out), err - case marker == Int32Marker: - var out int32 - err := binary.Read(buffer, binary.BigEndian, &out) - return int64(out), err - case marker == Int64Marker: - var out int64 - err := binary.Read(buffer, binary.BigEndian, &out) - return int64(out), err - - // FLOAT - case marker == FloatMarker: - var out float64 - err := binary.Read(buffer, binary.BigEndian, &out) - return out, err - - // STRING - case marker >= TinyStringMarker && marker <= TinyStringMarker+0x0F: - size := int(marker) - int(TinyStringMarker) - if size == 0 { - return "", nil - } - return string(buffer.Next(size)), nil - case marker == String8Marker: - var size int8 - if err := binary.Read(buffer, binary.BigEndian, &size); err != nil { - return nil, errors.Wrap(err, "An error occurred reading string size") - } - return string(buffer.Next(int(size))), nil - case marker == String16Marker: - var size int16 - if err := binary.Read(buffer, binary.BigEndian, &size); err != nil { - return nil, errors.Wrap(err, "An error occurred reading string size") - } - return string(buffer.Next(int(size))), nil - case marker == String32Marker: - var size int32 - if err := binary.Read(buffer, binary.BigEndian, &size); err != nil { - return nil, errors.Wrap(err, "An error occurred reading string size") - } - return string(buffer.Next(int(size))), nil - - // SLICE - case marker >= TinySliceMarker && marker <= TinySliceMarker+0x0F: - size := int(marker) - int(TinySliceMarker) - return d.decodeSlice(buffer, size) - case marker == Slice8Marker: - var size int8 - if err := binary.Read(buffer, binary.BigEndian, &size); err != nil { - return nil, errors.Wrap(err, "An error occurred reading slice size") - } - return d.decodeSlice(buffer, int(size)) - case marker == Slice16Marker: - var size int16 - if err := binary.Read(buffer, binary.BigEndian, &size); err != nil { - return nil, errors.Wrap(err, "An error occurred reading slice size") - } - return d.decodeSlice(buffer, int(size)) - case marker == Slice32Marker: - var size int32 - if err := binary.Read(buffer, binary.BigEndian, &size); err != nil { - return nil, errors.Wrap(err, "An error occurred reading slice size") - } - return d.decodeSlice(buffer, int(size)) - - // MAP - case marker >= TinyMapMarker && marker <= TinyMapMarker+0x0F: - size := int(marker) - int(TinyMapMarker) - return d.decodeMap(buffer, size) - case marker == Map8Marker: - var size int8 - if err := binary.Read(buffer, binary.BigEndian, &size); err != nil { - return nil, errors.Wrap(err, "An error occurred reading map size") - } - return d.decodeMap(buffer, int(size)) - case marker == Map16Marker: - var size int16 - if err := binary.Read(buffer, binary.BigEndian, &size); err != nil { - return nil, errors.Wrap(err, "An error occurred reading map size") - } - return d.decodeMap(buffer, int(size)) - case marker == Map32Marker: - var size int32 - if err := binary.Read(buffer, binary.BigEndian, &size); err != nil { - return nil, errors.Wrap(err, "An error occurred reading map size") - } - return d.decodeMap(buffer, int(size)) - - // STRUCTURES - case marker >= TinyStructMarker && marker <= TinyStructMarker+0x0F: - size := int(marker) - int(TinyStructMarker) - return d.decodeStruct(buffer, size) - case marker == Struct8Marker: - var size int8 - if err := binary.Read(buffer, binary.BigEndian, &size); err != nil { - return nil, errors.Wrap(err, "An error occurred reading struct size") - } - return d.decodeStruct(buffer, int(size)) - case marker == Struct16Marker: - var size int16 - if err := binary.Read(buffer, binary.BigEndian, &size); err != nil { - return nil, errors.Wrap(err, "An error occurred reading struct size") - } - return d.decodeStruct(buffer, int(size)) - - default: - return nil, errors.New("Unrecognized marker byte!: %x", marker) - } - -} - -func (d Decoder) decodeSlice(buffer *bytes.Buffer, size int) ([]interface{}, error) { - slice := make([]interface{}, size) - for i := 0; i < size; i++ { - item, err := d.decode(buffer) - if err != nil { - return nil, err - } - slice[i] = item - } - - return slice, nil -} - -func (d Decoder) decodeMap(buffer *bytes.Buffer, size int) (map[string]interface{}, error) { - mapp := make(map[string]interface{}, size) - for i := 0; i < size; i++ { - keyInt, err := d.decode(buffer) - if err != nil { - return nil, err - } - val, err := d.decode(buffer) - if err != nil { - return nil, err - } - - key, ok := keyInt.(string) - if !ok { - return nil, errors.New("Unexpected key type: %T with value %+v", keyInt, keyInt) - } - mapp[key] = val - } - - return mapp, nil -} - -func (d Decoder) decodeStruct(buffer *bytes.Buffer, size int) (interface{}, error) { - - signature, err := buffer.ReadByte() - if err != nil { - return nil, errors.Wrap(err, "An error occurred reading struct signature byte") - } - - switch signature { - case graph.NodeSignature: - return d.decodeNode(buffer) - case graph.RelationshipSignature: - return d.decodeRelationship(buffer) - case graph.PathSignature: - return d.decodePath(buffer) - case graph.UnboundRelationshipSignature: - return d.decodeUnboundRelationship(buffer) - case messages.RecordMessageSignature: - return d.decodeRecordMessage(buffer) - case messages.FailureMessageSignature: - return d.decodeFailureMessage(buffer) - case messages.IgnoredMessageSignature: - return d.decodeIgnoredMessage(buffer) - case messages.SuccessMessageSignature: - return d.decodeSuccessMessage(buffer) - case messages.AckFailureMessageSignature: - return d.decodeAckFailureMessage(buffer) - case messages.DiscardAllMessageSignature: - return d.decodeDiscardAllMessage(buffer) - case messages.PullAllMessageSignature: - return d.decodePullAllMessage(buffer) - case messages.ResetMessageSignature: - return d.decodeResetMessage(buffer) - default: - return nil, errors.New("Unrecognized type decoding struct with signature %x", signature) - } -} - -func (d Decoder) decodeNode(buffer *bytes.Buffer) (graph.Node, error) { - node := graph.Node{} - - nodeIdentityInt, err := d.decode(buffer) - if err != nil { - return node, err - } - node.NodeIdentity = nodeIdentityInt.(int64) - - labelInt, err := d.decode(buffer) - if err != nil { - return node, err - } - labelIntSlice, ok := labelInt.([]interface{}) - if !ok { - return node, errors.New("Expected: Labels []string, but got %T %+v", labelInt, labelInt) - } - node.Labels, err = sliceInterfaceToString(labelIntSlice) - if err != nil { - return node, err - } - - propertiesInt, err := d.decode(buffer) - if err != nil { - return node, err - } - node.Properties, ok = propertiesInt.(map[string]interface{}) - if !ok { - return node, errors.New("Expected: Properties map[string]interface{}, but got %T %+v", propertiesInt, propertiesInt) - } - - return node, nil - -} - -func (d Decoder) decodeRelationship(buffer *bytes.Buffer) (graph.Relationship, error) { - rel := graph.Relationship{} - - relIdentityInt, err := d.decode(buffer) - if err != nil { - return rel, err - } - rel.RelIdentity = relIdentityInt.(int64) - - startNodeIdentityInt, err := d.decode(buffer) - if err != nil { - return rel, err - } - rel.StartNodeIdentity = startNodeIdentityInt.(int64) - - endNodeIdentityInt, err := d.decode(buffer) - if err != nil { - return rel, err - } - rel.EndNodeIdentity = endNodeIdentityInt.(int64) - - var ok bool - typeInt, err := d.decode(buffer) - if err != nil { - return rel, err - } - rel.Type, ok = typeInt.(string) - if !ok { - return rel, errors.New("Expected: Type string, but got %T %+v", typeInt, typeInt) - } - - propertiesInt, err := d.decode(buffer) - if err != nil { - return rel, err - } - rel.Properties, ok = propertiesInt.(map[string]interface{}) - if !ok { - return rel, errors.New("Expected: Properties map[string]interface{}, but got %T %+v", propertiesInt, propertiesInt) - } - - return rel, nil -} - -func (d Decoder) decodePath(buffer *bytes.Buffer) (graph.Path, error) { - path := graph.Path{} - - nodesInt, err := d.decode(buffer) - if err != nil { - return path, err - } - nodesIntSlice, ok := nodesInt.([]interface{}) - if !ok { - return path, errors.New("Expected: Nodes []Node, but got %T %+v", nodesInt, nodesInt) - } - path.Nodes, err = sliceInterfaceToNode(nodesIntSlice) - if err != nil { - return path, err - } - - relsInt, err := d.decode(buffer) - if err != nil { - return path, err - } - relsIntSlice, ok := relsInt.([]interface{}) - if !ok { - return path, errors.New("Expected: Relationships []Relationship, but got %T %+v", relsInt, relsInt) - } - path.Relationships, err = sliceInterfaceToUnboundRelationship(relsIntSlice) - if err != nil { - return path, err - } - - seqInt, err := d.decode(buffer) - if err != nil { - return path, err - } - seqIntSlice, ok := seqInt.([]interface{}) - if !ok { - return path, errors.New("Expected: Sequence []int, but got %T %+v", seqInt, seqInt) - } - path.Sequence, err = sliceInterfaceToInt(seqIntSlice) - - return path, err -} - -func (d Decoder) decodeUnboundRelationship(buffer *bytes.Buffer) (graph.UnboundRelationship, error) { - rel := graph.UnboundRelationship{} - - relIdentityInt, err := d.decode(buffer) - if err != nil { - return rel, err - } - rel.RelIdentity = relIdentityInt.(int64) - - var ok bool - typeInt, err := d.decode(buffer) - if err != nil { - return rel, err - } - rel.Type, ok = typeInt.(string) - if !ok { - return rel, errors.New("Expected: Type string, but got %T %+v", typeInt, typeInt) - } - - propertiesInt, err := d.decode(buffer) - if err != nil { - return rel, err - } - rel.Properties, ok = propertiesInt.(map[string]interface{}) - if !ok { - return rel, errors.New("Expected: Properties map[string]interface{}, but got %T %+v", propertiesInt, propertiesInt) - } - - return rel, nil -} - -func (d Decoder) decodeRecordMessage(buffer *bytes.Buffer) (messages.RecordMessage, error) { - fieldsInt, err := d.decode(buffer) - if err != nil { - return messages.RecordMessage{}, err - } - fields, ok := fieldsInt.([]interface{}) - if !ok { - return messages.RecordMessage{}, errors.New("Expected: Fields []interface{}, but got %T %+v", fieldsInt, fieldsInt) - } - - return messages.NewRecordMessage(fields), nil -} - -func (d Decoder) decodeFailureMessage(buffer *bytes.Buffer) (messages.FailureMessage, error) { - metadataInt, err := d.decode(buffer) - if err != nil { - return messages.FailureMessage{}, err - } - metadata, ok := metadataInt.(map[string]interface{}) - if !ok { - return messages.FailureMessage{}, errors.New("Expected: Metadata map[string]interface{}, but got %T %+v", metadataInt, metadataInt) - } - - return messages.NewFailureMessage(metadata), nil -} - -func (d Decoder) decodeIgnoredMessage(buffer *bytes.Buffer) (messages.IgnoredMessage, error) { - return messages.NewIgnoredMessage(), nil -} - -func (d Decoder) decodeSuccessMessage(buffer *bytes.Buffer) (messages.SuccessMessage, error) { - metadataInt, err := d.decode(buffer) - if err != nil { - return messages.SuccessMessage{}, err - } - metadata, ok := metadataInt.(map[string]interface{}) - if !ok { - return messages.SuccessMessage{}, errors.New("Expected: Metadata map[string]interface{}, but got %T %+v", metadataInt, metadataInt) - } - - return messages.NewSuccessMessage(metadata), nil -} - -func (d Decoder) decodeAckFailureMessage(buffer *bytes.Buffer) (messages.AckFailureMessage, error) { - return messages.NewAckFailureMessage(), nil -} - -func (d Decoder) decodeDiscardAllMessage(buffer *bytes.Buffer) (messages.DiscardAllMessage, error) { - return messages.NewDiscardAllMessage(), nil -} - -func (d Decoder) decodePullAllMessage(buffer *bytes.Buffer) (messages.PullAllMessage, error) { - return messages.NewPullAllMessage(), nil -} - -func (d Decoder) decodeResetMessage(buffer *bytes.Buffer) (messages.ResetMessage, error) { - return messages.NewResetMessage(), nil -} diff --git a/vendor/github.com/johnnadratowski/golang-neo4j-bolt-driver/encoding/doc.go b/vendor/github.com/johnnadratowski/golang-neo4j-bolt-driver/encoding/doc.go deleted file mode 100644 index a21d9b90..00000000 --- a/vendor/github.com/johnnadratowski/golang-neo4j-bolt-driver/encoding/doc.go +++ /dev/null @@ -1,2 +0,0 @@ -/*Package encoding is used to encode/decode data going to/from the bolt protocol.*/ -package encoding diff --git a/vendor/github.com/johnnadratowski/golang-neo4j-bolt-driver/encoding/encoder.go b/vendor/github.com/johnnadratowski/golang-neo4j-bolt-driver/encoding/encoder.go deleted file mode 100644 index 56752c8e..00000000 --- a/vendor/github.com/johnnadratowski/golang-neo4j-bolt-driver/encoding/encoder.go +++ /dev/null @@ -1,468 +0,0 @@ -package encoding - -import ( - "encoding/binary" - "io" - "math" - - "bytes" - - "github.com/johnnadratowski/golang-neo4j-bolt-driver/errors" - "github.com/johnnadratowski/golang-neo4j-bolt-driver/structures" -) - -const ( - // NilMarker represents the encoding marker byte for a nil object - NilMarker = 0xC0 - - // TrueMarker represents the encoding marker byte for a true boolean object - TrueMarker = 0xC3 - // FalseMarker represents the encoding marker byte for a false boolean object - FalseMarker = 0xC2 - - // Int8Marker represents the encoding marker byte for a int8 object - Int8Marker = 0xC8 - // Int16Marker represents the encoding marker byte for a int16 object - Int16Marker = 0xC9 - // Int32Marker represents the encoding marker byte for a int32 object - Int32Marker = 0xCA - // Int64Marker represents the encoding marker byte for a int64 object - Int64Marker = 0xCB - - // FloatMarker represents the encoding marker byte for a float32/64 object - FloatMarker = 0xC1 - - // TinyStringMarker represents the encoding marker byte for a string object - TinyStringMarker = 0x80 - // String8Marker represents the encoding marker byte for a string object - String8Marker = 0xD0 - // String16Marker represents the encoding marker byte for a string object - String16Marker = 0xD1 - // String32Marker represents the encoding marker byte for a string object - String32Marker = 0xD2 - - // TinySliceMarker represents the encoding marker byte for a slice object - TinySliceMarker = 0x90 - // Slice8Marker represents the encoding marker byte for a slice object - Slice8Marker = 0xD4 - // Slice16Marker represents the encoding marker byte for a slice object - Slice16Marker = 0xD5 - // Slice32Marker represents the encoding marker byte for a slice object - Slice32Marker = 0xD6 - - // TinyMapMarker represents the encoding marker byte for a map object - TinyMapMarker = 0xA0 - // Map8Marker represents the encoding marker byte for a map object - Map8Marker = 0xD8 - // Map16Marker represents the encoding marker byte for a map object - Map16Marker = 0xD9 - // Map32Marker represents the encoding marker byte for a map object - Map32Marker = 0xDA - - // TinyStructMarker represents the encoding marker byte for a struct object - TinyStructMarker = 0xB0 - // Struct8Marker represents the encoding marker byte for a struct object - Struct8Marker = 0xDC - // Struct16Marker represents the encoding marker byte for a struct object - Struct16Marker = 0xDD -) - -var ( - // EndMessage is the data to send to end a message - EndMessage = []byte{byte(0x00), byte(0x00)} -) - -// Encoder encodes objects of different types to the given stream. -// Attempts to support all builtin golang types, when it can be confidently -// mapped to a data type from: http://alpha.neohq.net/docs/server-manual/bolt-serialization.html#bolt-packstream-structures -// (version v3.1.0-M02 at the time of writing this. -// -// Maps and Slices are a special case, where only -// map[string]interface{} and []interface{} are supported. -// The interface for maps and slices may be more permissive in the future. -type Encoder struct { - w io.Writer - buf *bytes.Buffer - chunkSize uint16 -} - -// NewEncoder Creates a new Encoder object -func NewEncoder(w io.Writer, chunkSize uint16) Encoder { - return Encoder{ - w: w, - buf: &bytes.Buffer{}, - chunkSize: chunkSize, - } -} - -// Marshal is used to marshal an object to the bolt interface encoded bytes -func Marshal(v interface{}) ([]byte, error) { - x := &bytes.Buffer{} - err := NewEncoder(x, math.MaxUint16).Encode(v) - return x.Bytes(), err -} - -// write writes to the writer. Buffers the writes using chunkSize. -func (e Encoder) Write(p []byte) (n int, err error) { - - n, err = e.buf.Write(p) - if err != nil { - err = errors.Wrap(err, "An error occurred writing to encoder temp buffer") - return n, err - } - - length := e.buf.Len() - for length >= int(e.chunkSize) { - if err := binary.Write(e.w, binary.BigEndian, e.chunkSize); err != nil { - return 0, errors.Wrap(err, "An error occured writing chunksize") - } - - numWritten, err := e.w.Write(e.buf.Next(int(e.chunkSize))) - if err != nil { - err = errors.Wrap(err, "An error occured writing a chunk") - } - - return numWritten, err - } - - return n, nil -} - -// flush finishes the encoding stream by flushing it to the writer -func (e Encoder) flush() error { - length := e.buf.Len() - if length > 0 { - if err := binary.Write(e.w, binary.BigEndian, uint16(length)); err != nil { - return errors.Wrap(err, "An error occured writing length bytes during flush") - } - - if _, err := e.buf.WriteTo(e.w); err != nil { - return errors.Wrap(err, "An error occured writing message bytes during flush") - } - } - - _, err := e.w.Write(EndMessage) - if err != nil { - return errors.Wrap(err, "An error occurred ending encoding message") - } - e.buf.Reset() - - return nil -} - -// Encode encodes an object to the stream -func (e Encoder) Encode(iVal interface{}) error { - - err := e.encode(iVal) - if err != nil { - return err - } - - // Whatever is left in the buffer for the chunk at the end, write it out - return e.flush() -} - -// Encode encodes an object to the stream -func (e Encoder) encode(iVal interface{}) error { - - var err error - switch val := iVal.(type) { - case nil: - err = e.encodeNil() - case bool: - err = e.encodeBool(val) - case int: - err = e.encodeInt(int64(val)) - case int8: - err = e.encodeInt(int64(val)) - case int16: - err = e.encodeInt(int64(val)) - case int32: - err = e.encodeInt(int64(val)) - case int64: - err = e.encodeInt(val) - case uint: - err = e.encodeInt(int64(val)) - case uint8: - err = e.encodeInt(int64(val)) - case uint16: - err = e.encodeInt(int64(val)) - case uint32: - err = e.encodeInt(int64(val)) - case uint64: - if val > math.MaxInt64 { - return errors.New("Integer too big: %d. Max integer supported: %d", val, int64(math.MaxInt64)) - } - err = e.encodeInt(int64(val)) - case float32: - err = e.encodeFloat(float64(val)) - case float64: - err = e.encodeFloat(val) - case string: - err = e.encodeString(val) - case []interface{}: - err = e.encodeSlice(val) - case map[string]interface{}: - err = e.encodeMap(val) - case structures.Structure: - err = e.encodeStructure(val) - default: - return errors.New("Unrecognized type when encoding data for Bolt transport: %T %+v", val, val) - } - - return err -} - -func (e Encoder) encodeNil() error { - _, err := e.Write([]byte{NilMarker}) - return err -} - -func (e Encoder) encodeBool(val bool) error { - var err error - if val { - _, err = e.Write([]byte{TrueMarker}) - } else { - _, err = e.Write([]byte{FalseMarker}) - } - return err -} - -func (e Encoder) encodeInt(val int64) error { - var err error - switch { - case val >= math.MinInt64 && val < math.MinInt32: - // Write as INT_64 - if _, err = e.Write([]byte{Int64Marker}); err != nil { - return err - } - err = binary.Write(e, binary.BigEndian, val) - case val >= math.MinInt32 && val < math.MinInt16: - // Write as INT_32 - if _, err = e.Write([]byte{Int32Marker}); err != nil { - return err - } - err = binary.Write(e, binary.BigEndian, int32(val)) - case val >= math.MinInt16 && val < math.MinInt8: - // Write as INT_16 - if _, err = e.Write([]byte{Int16Marker}); err != nil { - return err - } - err = binary.Write(e, binary.BigEndian, int16(val)) - case val >= math.MinInt8 && val < -16: - // Write as INT_8 - if _, err = e.Write([]byte{Int8Marker}); err != nil { - return err - } - err = binary.Write(e, binary.BigEndian, int8(val)) - case val >= -16 && val <= math.MaxInt8: - // Write as TINY_INT - err = binary.Write(e, binary.BigEndian, int8(val)) - case val > math.MaxInt8 && val <= math.MaxInt16: - // Write as INT_16 - if _, err = e.Write([]byte{Int16Marker}); err != nil { - return err - } - err = binary.Write(e, binary.BigEndian, int16(val)) - case val > math.MaxInt16 && val <= math.MaxInt32: - // Write as INT_32 - if _, err = e.Write([]byte{Int32Marker}); err != nil { - return err - } - err = binary.Write(e, binary.BigEndian, int32(val)) - case val > math.MaxInt32 && val <= math.MaxInt64: - // Write as INT_64 - if _, err = e.Write([]byte{Int64Marker}); err != nil { - return err - } - err = binary.Write(e, binary.BigEndian, val) - default: - return errors.New("Int too long to write: %d", val) - } - if err != nil { - return errors.Wrap(err, "An error occured writing an int to bolt") - } - return err -} - -func (e Encoder) encodeFloat(val float64) error { - if _, err := e.Write([]byte{FloatMarker}); err != nil { - return err - } - - err := binary.Write(e, binary.BigEndian, val) - if err != nil { - return errors.Wrap(err, "An error occured writing a float to bolt") - } - - return err -} - -func (e Encoder) encodeString(val string) error { - var err error - bytes := []byte(val) - - length := len(bytes) - switch { - case length <= 15: - if _, err = e.Write([]byte{byte(TinyStringMarker + length)}); err != nil { - return err - } - _, err = e.Write(bytes) - case length > 15 && length <= math.MaxUint8: - if _, err = e.Write([]byte{String8Marker}); err != nil { - return err - } - if err = binary.Write(e, binary.BigEndian, int8(length)); err != nil { - return err - } - _, err = e.Write(bytes) - case length > math.MaxUint8 && length <= math.MaxUint16: - if _, err = e.Write([]byte{String16Marker}); err != nil { - return err - } - if err = binary.Write(e, binary.BigEndian, int16(length)); err != nil { - return err - } - _, err = e.Write(bytes) - case length > math.MaxUint16 && int64(length) <= math.MaxUint32: - if _, err = e.Write([]byte{String32Marker}); err != nil { - return err - } - if err = binary.Write(e, binary.BigEndian, int32(length)); err != nil { - return err - } - _, err = e.Write(bytes) - default: - return errors.New("String too long to write: %s", val) - } - return err -} - -func (e Encoder) encodeSlice(val []interface{}) error { - length := len(val) - switch { - case length <= 15: - if _, err := e.Write([]byte{byte(TinySliceMarker + length)}); err != nil { - return err - } - case length > 15 && length <= math.MaxUint8: - if _, err := e.Write([]byte{Slice8Marker}); err != nil { - return err - } - if err := binary.Write(e, binary.BigEndian, int8(length)); err != nil { - return err - } - case length > math.MaxUint8 && length <= math.MaxUint16: - if _, err := e.Write([]byte{Slice16Marker}); err != nil { - return err - } - if err := binary.Write(e, binary.BigEndian, int16(length)); err != nil { - return err - } - case length >= math.MaxUint16 && int64(length) <= math.MaxUint32: - if _, err := e.Write([]byte{Slice32Marker}); err != nil { - return err - } - if err := binary.Write(e, binary.BigEndian, int32(length)); err != nil { - return err - } - default: - return errors.New("Slice too long to write: %+v", val) - } - - // Encode Slice values - for _, item := range val { - if err := e.encode(item); err != nil { - return err - } - } - - return nil -} - -func (e Encoder) encodeMap(val map[string]interface{}) error { - length := len(val) - switch { - case length <= 15: - if _, err := e.Write([]byte{byte(TinyMapMarker + length)}); err != nil { - return err - } - case length > 15 && length <= math.MaxUint8: - if _, err := e.Write([]byte{Map8Marker}); err != nil { - return err - } - if err := binary.Write(e, binary.BigEndian, int8(length)); err != nil { - return err - } - case length > math.MaxUint8 && length <= math.MaxUint16: - if _, err := e.Write([]byte{Map16Marker}); err != nil { - return err - } - if err := binary.Write(e, binary.BigEndian, int16(length)); err != nil { - return err - } - case length >= math.MaxUint16 && int64(length) <= math.MaxUint32: - if _, err := e.Write([]byte{Map32Marker}); err != nil { - return err - } - if err := binary.Write(e, binary.BigEndian, int32(length)); err != nil { - return err - } - default: - return errors.New("Map too long to write: %+v", val) - } - - // Encode Map values - for k, v := range val { - if err := e.encode(k); err != nil { - return err - } - if err := e.encode(v); err != nil { - return err - } - } - - return nil -} - -func (e Encoder) encodeStructure(val structures.Structure) error { - - fields := val.AllFields() - length := len(fields) - switch { - case length <= 15: - if _, err := e.Write([]byte{byte(TinyStructMarker + length)}); err != nil { - return err - } - case length > 15 && length <= math.MaxUint8: - if _, err := e.Write([]byte{Struct8Marker}); err != nil { - return err - } - if err := binary.Write(e, binary.BigEndian, int8(length)); err != nil { - return err - } - case length > math.MaxUint8 && length <= math.MaxUint16: - if _, err := e.Write([]byte{Struct16Marker}); err != nil { - return err - } - if err := binary.Write(e, binary.BigEndian, int16(length)); err != nil { - return err - } - default: - return errors.New("Structure too long to write: %+v", val) - } - - _, err := e.Write([]byte{byte(val.Signature())}) - if err != nil { - return errors.Wrap(err, "An error occurred writing to encoder a struct field") - } - - for _, field := range fields { - if err := e.encode(field); err != nil { - return errors.Wrap(err, "An error occurred encoding a struct field") - } - } - - return nil -} diff --git a/vendor/github.com/johnnadratowski/golang-neo4j-bolt-driver/encoding/util.go b/vendor/github.com/johnnadratowski/golang-neo4j-bolt-driver/encoding/util.go deleted file mode 100644 index 1b219325..00000000 --- a/vendor/github.com/johnnadratowski/golang-neo4j-bolt-driver/encoding/util.go +++ /dev/null @@ -1,62 +0,0 @@ -package encoding - -import ( - "github.com/johnnadratowski/golang-neo4j-bolt-driver/errors" - "github.com/johnnadratowski/golang-neo4j-bolt-driver/structures/graph" -) - -func sliceInterfaceToString(from []interface{}) ([]string, error) { - to := make([]string, len(from)) - for idx, item := range from { - toItem, ok := item.(string) - if !ok { - return nil, errors.New("Expected string value. Got %T %+v", item, item) - } - to[idx] = toItem - } - return to, nil -} - -func sliceInterfaceToInt(from []interface{}) ([]int, error) { - to := make([]int, len(from)) - for idx, item := range from { - to[idx] = int(item.(int64)) - } - return to, nil -} - -func sliceInterfaceToNode(from []interface{}) ([]graph.Node, error) { - to := make([]graph.Node, len(from)) - for idx, item := range from { - toItem, ok := item.(graph.Node) - if !ok { - return nil, errors.New("Expected Node value. Got %T %+v", item, item) - } - to[idx] = toItem - } - return to, nil -} - -func sliceInterfaceToRelationship(from []interface{}) ([]graph.Relationship, error) { - to := make([]graph.Relationship, len(from)) - for idx, item := range from { - toItem, ok := item.(graph.Relationship) - if !ok { - return nil, errors.New("Expected Relationship value. Got %T %+v", item, item) - } - to[idx] = toItem - } - return to, nil -} - -func sliceInterfaceToUnboundRelationship(from []interface{}) ([]graph.UnboundRelationship, error) { - to := make([]graph.UnboundRelationship, len(from)) - for idx, item := range from { - toItem, ok := item.(graph.UnboundRelationship) - if !ok { - return nil, errors.New("Expected UnboundRelationship value. Got %T %+v", item, item) - } - to[idx] = toItem - } - return to, nil -} diff --git a/vendor/github.com/johnnadratowski/golang-neo4j-bolt-driver/errors/doc.go b/vendor/github.com/johnnadratowski/golang-neo4j-bolt-driver/errors/doc.go deleted file mode 100644 index 4d9344ad..00000000 --- a/vendor/github.com/johnnadratowski/golang-neo4j-bolt-driver/errors/doc.go +++ /dev/null @@ -1,2 +0,0 @@ -/*Package errors contains the errors used by the bolt driver. Implements wrapped errors and stack traces*/ -package errors diff --git a/vendor/github.com/johnnadratowski/golang-neo4j-bolt-driver/errors/errors.go b/vendor/github.com/johnnadratowski/golang-neo4j-bolt-driver/errors/errors.go deleted file mode 100644 index 3d5a48ff..00000000 --- a/vendor/github.com/johnnadratowski/golang-neo4j-bolt-driver/errors/errors.go +++ /dev/null @@ -1,80 +0,0 @@ -package errors - -import ( - "fmt" - "runtime/debug" - "strings" -) - -// Error is the base error type adds stack trace and wrapping errors -type Error struct { - msg string - wrapped error - stack []byte - level int -} - -// New makes a new error -func New(msg string, args ...interface{}) *Error { - return &Error{ - msg: fmt.Sprintf(msg, args...), - stack: debug.Stack(), - level: 0, - } -} - -// Wrap wraps an error with a new error -func Wrap(err error, msg string, args ...interface{}) *Error { - if e, ok := err.(*Error); ok { - return &Error{ - msg: fmt.Sprintf(msg, args...), - wrapped: e, - } - } - - return &Error{ - msg: fmt.Sprintf(msg, args...), - wrapped: err, - stack: debug.Stack(), - } -} - -// Error gets the error output -func (e *Error) Error() string { - return e.error(0) -} - -// Inner returns the inner error wrapped by this error -func (e *Error) Inner() error { - return e.wrapped -} - -// InnerMost returns the innermost error wrapped by this error -func (e *Error) InnerMost() error { - if e.wrapped == nil { - return e - } - - if inner, ok := e.wrapped.(*Error); ok { - return inner.InnerMost() - } - - return e.wrapped -} - -func (e *Error) error(level int) string { - msg := fmt.Sprintf("%s%s", strings.Repeat("\t", level), e.msg) - if e.wrapped != nil { - if wrappedErr, ok := e.wrapped.(*Error); ok { - msg += fmt.Sprintf("\n%s", wrappedErr.error(level+1)) - } else { - msg += fmt.Sprintf("\nInternal Error(%T):%s", e.wrapped, e.wrapped.Error()) - } - } - - if len(e.stack) > 0 { - msg += fmt.Sprintf("\n\n Stack Trace:\n\n%s", e.stack) - } - - return msg -} diff --git a/vendor/github.com/johnnadratowski/golang-neo4j-bolt-driver/log/doc.go b/vendor/github.com/johnnadratowski/golang-neo4j-bolt-driver/log/doc.go deleted file mode 100644 index 08aa0347..00000000 --- a/vendor/github.com/johnnadratowski/golang-neo4j-bolt-driver/log/doc.go +++ /dev/null @@ -1,6 +0,0 @@ -/*Package log implements the logging for the bolt driver - -There are 3 logging levels - trace, info and error. Setting trace would also set info and error logs. -You can use the SetLevel("trace") to set trace logging, for example. -*/ -package log diff --git a/vendor/github.com/johnnadratowski/golang-neo4j-bolt-driver/log/log.go b/vendor/github.com/johnnadratowski/golang-neo4j-bolt-driver/log/log.go deleted file mode 100644 index dc99a683..00000000 --- a/vendor/github.com/johnnadratowski/golang-neo4j-bolt-driver/log/log.go +++ /dev/null @@ -1,112 +0,0 @@ -package log - -import ( - l "log" - "os" - "strings" -) - -// Level is the logging level -type Level int - -const ( - // NoneLevel is no logging - NoneLevel Level = iota - // ErrorLevel is error logging - ErrorLevel Level = iota - // InfoLevel is info logging - InfoLevel Level = iota - // TraceLevel is trace logging - TraceLevel Level = iota -) - -var ( - level = NoneLevel - // ErrorLog is the logger for error logging. This can be manually overridden. - ErrorLog = l.New(os.Stderr, "[BOLT][ERROR]", l.LstdFlags) - // InfoLog is the logger for info logging. This can be manually overridden. - InfoLog = l.New(os.Stderr, "[BOLT][INFO]", l.LstdFlags) - // TraceLog is the logger for trace logging. This can be manually overridden. - TraceLog = l.New(os.Stderr, "[BOLT][TRACE]", l.LstdFlags) -) - -// SetLevel sets the logging level of this package. levelStr should be one of "trace", "info", or "error -func SetLevel(levelStr string) { - switch strings.ToLower(levelStr) { - case "trace": - level = TraceLevel - case "info": - level = InfoLevel - case "error": - level = ErrorLevel - default: - level = NoneLevel - } -} - -// GetLevel gets the logging level -func GetLevel() Level { - return level -} - -// Trace writes a trace log in the format of Println -func Trace(args ...interface{}) { - if level >= TraceLevel { - TraceLog.Println(args...) - } -} - -// Tracef writes a trace log in the format of Printf -func Tracef(msg string, args ...interface{}) { - if level >= TraceLevel { - TraceLog.Printf(msg, args...) - } -} - -// Info writes an info log in the format of Println -func Info(args ...interface{}) { - if level >= InfoLevel { - InfoLog.Println(args...) - } -} - -// Infof writes an info log in the format of Printf -func Infof(msg string, args ...interface{}) { - if level >= InfoLevel { - InfoLog.Printf(msg, args...) - } -} - -// Error writes an error log in the format of Println -func Error(args ...interface{}) { - if level >= ErrorLevel { - ErrorLog.Println(args...) - } -} - -// Errorf writes an error log in the format of Printf -func Errorf(msg string, args ...interface{}) { - if level >= ErrorLevel { - ErrorLog.Printf(msg, args...) - } -} - -// Fatal writes an error log in the format of Fatalln -func Fatal(args ...interface{}) { - l.Fatalln(args...) -} - -// Fatalf writes an error log in the format of Fatalf -func Fatalf(msg string, args ...interface{}) { - l.Fatalf(msg, args...) -} - -// Panic writes an error log in the format of Panicln -func Panic(args ...interface{}) { - l.Panicln(args...) -} - -// Panicf writes an error log in the format of Panicf -func Panicf(msg string, args ...interface{}) { - l.Panicf(msg, args...) -} diff --git a/vendor/github.com/johnnadratowski/golang-neo4j-bolt-driver/recorder.go b/vendor/github.com/johnnadratowski/golang-neo4j-bolt-driver/recorder.go deleted file mode 100644 index 41ba2ca8..00000000 --- a/vendor/github.com/johnnadratowski/golang-neo4j-bolt-driver/recorder.go +++ /dev/null @@ -1,291 +0,0 @@ -package golangNeo4jBoltDriver - -import ( - "bytes" - "encoding/json" - "fmt" - "net" - "os" - "time" - - "github.com/johnnadratowski/golang-neo4j-bolt-driver/encoding" - "github.com/johnnadratowski/golang-neo4j-bolt-driver/errors" - "github.com/johnnadratowski/golang-neo4j-bolt-driver/log" -) - -// recorder records a given session with Neo4j. -// allows for playback of sessions as well -type recorder struct { - net.Conn - name string - events []*Event - connStr string - currentEvent int -} - -func newRecorder(name string, connStr string) *recorder { - r := &recorder{ - name: name, - connStr: connStr, - } - - if r.connStr == "" { - if err := r.load(r.name); err != nil { - log.Fatalf("Couldn't load data from recording files!: %s", err) - } - } - - return r -} - -func (r *recorder) completedLast() bool { - event := r.lastEvent() - if event == nil { - return true - } - - return event.Completed -} - -func (r *recorder) lastEvent() *Event { - if len(r.events) > 0 { - return r.events[len(r.events)-1] - } - return nil -} - -// Read from the net conn, recording the interaction -func (r *recorder) Read(b []byte) (n int, err error) { - if r.Conn != nil { - numRead, err := r.Conn.Read(b) - if numRead > 0 { - r.record(b[:numRead], false) - } - - if err != nil { - r.recordErr(err, false) - } - - return numRead, err - } - - if r.currentEvent >= len(r.events) { - return 0, errors.New("Trying to read past all of the events in the recorder! %#v", r) - } - event := r.events[r.currentEvent] - if event.IsWrite { - return 0, errors.New("Recorder expected Read, got Write! %#v, Event: %#v", r, event) - } - - for i := 0; i < len(b); i++ { - if len(event.Event) == 0 { - return i, errors.New("Attempted to read past current event in recorder! Bytes: %s. Recorder %#v, Event; %#v", b, r, event) - } - b[i] = event.Event[0] - event.Event = event.Event[1:] - } - - if len(event.Event) == 0 { - r.currentEvent++ - } - - return len(b), nil -} - -// Close the net conn, outputting the recording -func (r *recorder) Close() error { - if r.Conn != nil { - err := r.flush() - if err != nil { - return err - } - return r.Conn.Close() - } else if len(r.events) > 0 { - if r.currentEvent != len(r.events) { - return errors.New("Didn't read all of the events in the recorder on close! %#v", r) - } - - if len(r.events[len(r.events)-1].Event) != 0 { - return errors.New("Left data in an event in the recorder on close! %#v", r) - } - - return nil - } - - return nil -} - -// Write to the net conn, recording the interaction -func (r *recorder) Write(b []byte) (n int, err error) { - if r.Conn != nil { - numWritten, err := r.Conn.Write(b) - if numWritten > 0 { - r.record(b[:numWritten], true) - } - - if err != nil { - r.recordErr(err, true) - } - - return numWritten, err - } - - if r.currentEvent >= len(r.events) { - return 0, errors.New("Trying to write past all of the events in the recorder! %#v", r) - } - event := r.events[r.currentEvent] - if !event.IsWrite { - return 0, errors.New("Recorder expected Write, got Read! %#v, Event: %#v", r, event) - } - - for i := 0; i < len(b); i++ { - if len(event.Event) == 0 { - return i, errors.New("Attempted to write past current event in recorder! %#v, Event: %#v", r, event) - } - event.Event = event.Event[1:] - } - - if len(event.Event) == 0 { - r.currentEvent++ - } - - return len(b), nil -} - -func (r *recorder) record(data []byte, isWrite bool) { - event := r.lastEvent() - if event == nil || event.Completed || event.IsWrite != isWrite { - event = newEvent(isWrite) - r.events = append(r.events, event) - } - - event.Event = append(event.Event, data...) - if data[len(data)-2] == byte(0x00) && data[len(data)-1] == byte(0x00) { - event.Completed = true - } -} - -func (r *recorder) recordErr(err error, isWrite bool) { - event := r.lastEvent() - if event == nil || event.Completed || event.IsWrite != isWrite { - event = newEvent(isWrite) - r.events = append(r.events, event) - } - - event.Error = err - event.Completed = true -} - -func (r *recorder) load(name string) error { - file, err := os.OpenFile("./recordings/"+name+".json", os.O_RDONLY, 0660) - if err != nil { - return err - } - - return json.NewDecoder(file).Decode(&r.events) -} - -func (r *recorder) writeRecording() error { - file, err := os.OpenFile("./recordings/"+r.name+".json", os.O_CREATE|os.O_TRUNC|os.O_WRONLY, 0660) - if err != nil { - return err - } - return json.NewEncoder(file).Encode(r.events) -} - -func (r *recorder) flush() error { - if os.Getenv("RECORD_OUTPUT") == "" { - return nil - } - return r.writeRecording() -} - -func (r *recorder) print() { - fmt.Println("PRINTING RECORDING " + r.name) - - for _, event := range r.events { - - fmt.Println() - fmt.Println() - - typee := "READ" - if event.IsWrite { - typee = "WRITE" - } - fmt.Printf("%s @ %d:\n\n", typee, event.Timestamp) - - decoded, err := encoding.NewDecoder(bytes.NewBuffer(event.Event)).Decode() - if err != nil { - fmt.Printf("Error decoding data! Error: %s\n", err) - } else { - fmt.Printf("Decoded Data:\n\n%+v\n\n", decoded) - } - - fmt.Print("Encoded Bytes:\n\n") - fmt.Print(sprintByteHex(event.Event)) - if !event.Completed { - fmt.Println("EVENT NEVER COMPLETED!!!!!!!!!!!!!!!") - } - - if event.Error != nil { - fmt.Printf("ERROR OCCURRED DURING EVENT!!!!!!!\n\nError: %s\n", event.Error) - } - - fmt.Println() - fmt.Println() - } - - fmt.Println("RECORDING END " + r.name) -} - -func (r *recorder) LocalAddr() net.Addr { - if r.Conn != nil { - return r.Conn.LocalAddr() - } - return nil -} - -func (r *recorder) RemoteAddr() net.Addr { - if r.Conn != nil { - return r.Conn.RemoteAddr() - } - return nil -} - -func (r *recorder) SetDeadline(t time.Time) error { - if r.Conn != nil { - return r.Conn.SetDeadline(t) - } - return nil -} - -func (r *recorder) SetReadDeadline(t time.Time) error { - if r.Conn != nil { - return r.Conn.SetReadDeadline(t) - } - return nil -} - -func (r *recorder) SetWriteDeadline(t time.Time) error { - if r.Conn != nil { - return r.Conn.SetWriteDeadline(t) - } - return nil -} - -// Event represents a single recording (read or write) event in the recorder -type Event struct { - Timestamp int64 `json:"-"` - Event []byte - IsWrite bool - Completed bool - Error error -} - -func newEvent(isWrite bool) *Event { - return &Event{ - Timestamp: time.Now().UnixNano(), - Event: []byte{}, - IsWrite: isWrite, - } -} diff --git a/vendor/github.com/johnnadratowski/golang-neo4j-bolt-driver/result.go b/vendor/github.com/johnnadratowski/golang-neo4j-bolt-driver/result.go deleted file mode 100644 index c37a9212..00000000 --- a/vendor/github.com/johnnadratowski/golang-neo4j-bolt-driver/result.go +++ /dev/null @@ -1,69 +0,0 @@ -package golangNeo4jBoltDriver - -import "github.com/johnnadratowski/golang-neo4j-bolt-driver/errors" - -// Result represents a result from a query that returns no data -type Result interface { - // LastInsertId Always returns -1. This is necessary - // to meet the sql.driver interface - LastInsertId() (int64, error) - // RowsAffected returns the number of rows affected - // This doesn't currently support updates, only - // inserts/deletions - RowsAffected() (int64, error) - // Metadata returns the metadata response from neo4j - Metadata() map[string]interface{} -} - -type boltResult struct { - metadata map[string]interface{} -} - -func newResult(metadata map[string]interface{}) boltResult { - return boltResult{metadata: metadata} -} - -// Returns the response metadata from the bolt success message -func (r boltResult) Metadata() map[string]interface{} { - return r.metadata -} - -// LastInsertId gets the last inserted id. This will always return -1. -func (r boltResult) LastInsertId() (int64, error) { - // TODO: Is this possible? - return -1, nil -} - -// RowsAffected returns the number of nodes+rels created/deleted. For reasons of limitations -// on the API, we cannot tell how many nodes+rels were updated, only how many properties were -// updated. If this changes in the future, number updated will be added to the output of this -// interface. -func (r boltResult) RowsAffected() (int64, error) { - stats, ok := r.metadata["stats"].(map[string]interface{}) - if !ok { - return -1, errors.New("Unrecognized type for stats metadata: %#v", r.metadata) - } - - var rowsAffected int64 - nodesCreated, ok := stats["nodes-created"] - if ok { - rowsAffected += nodesCreated.(int64) - } - - relsCreated, ok := stats["relationships-created"] - if ok { - rowsAffected += relsCreated.(int64) - } - - nodesDeleted, ok := stats["nodes-deleted"] - if ok { - rowsAffected += nodesDeleted.(int64) - } - - relsDeleted, ok := stats["relationships-deleted"] - if ok { - rowsAffected += relsDeleted.(int64) - } - - return rowsAffected, nil -} diff --git a/vendor/github.com/johnnadratowski/golang-neo4j-bolt-driver/rows.go b/vendor/github.com/johnnadratowski/golang-neo4j-bolt-driver/rows.go deleted file mode 100644 index 4eb67822..00000000 --- a/vendor/github.com/johnnadratowski/golang-neo4j-bolt-driver/rows.go +++ /dev/null @@ -1,289 +0,0 @@ -package golangNeo4jBoltDriver - -import ( - "database/sql/driver" - "io" - - "github.com/johnnadratowski/golang-neo4j-bolt-driver/encoding" - "github.com/johnnadratowski/golang-neo4j-bolt-driver/errors" - "github.com/johnnadratowski/golang-neo4j-bolt-driver/log" - "github.com/johnnadratowski/golang-neo4j-bolt-driver/structures/graph" - "github.com/johnnadratowski/golang-neo4j-bolt-driver/structures/messages" -) - -// Rows represents results of rows from the DB -// -// Row objects ARE NOT THREAD SAFE. -// If you want to use multiple go routines with these objects, -// you should use a driver to create a new conn for each routine. -type Rows interface { - // Columns Gets the names of the columns in the returned dataset - Columns() []string - // Metadata Gets all of the metadata returned from Neo on query start - Metadata() map[string]interface{} - // Close the rows, flushing any existing datastream - Close() error - // NextNeo gets the next row result - // When the rows are completed, returns the success metadata - // and io.EOF - NextNeo() ([]interface{}, map[string]interface{}, error) - // All gets all of the results from the row set. It's recommended to use NextNeo when - // there are a lot of rows - All() ([][]interface{}, map[string]interface{}, error) -} - -// PipelineRows represents results of a set of rows from the DB -// when running a pipeline statement. -// -// Row objects ARE NOT THREAD SAFE. -// If you want to use multiple go routines with these objects, -// you should use a driver to create a new conn for each routine. -type PipelineRows interface { - // Columns Gets the names of the columns in the returned dataset - Columns() []string - // Metadata Gets all of the metadata returned from Neo on query start - Metadata() map[string]interface{} - // Close the rows, flushing any existing datastream - Close() error - // NextPipeline gets the next row result - // When the rows are completed, returns the success metadata and the next - // set of rows. - // When all rows are completed, returns io.EOF - NextPipeline() ([]interface{}, map[string]interface{}, PipelineRows, error) -} - -type boltRows struct { - metadata map[string]interface{} - statement *boltStmt - closed bool - consumed bool - finishedConsume bool - pipelineIndex int - closeStatement bool -} - -func newRows(statement *boltStmt, metadata map[string]interface{}) *boltRows { - return &boltRows{ - statement: statement, - metadata: metadata, - } -} - -func newQueryRows(statement *boltStmt, metadata map[string]interface{}) *boltRows { - rows := newRows(statement, metadata) - rows.consumed = true // Already consumed from pipeline with PULL_ALL - rows.closeStatement = true // Query rows don't expose a statement, so they need to close the statement when they close - return rows -} - -func newPipelineRows(statement *boltStmt, metadata map[string]interface{}, pipelineIndex int) *boltRows { - rows := newRows(statement, metadata) - rows.consumed = true // Already consumed from pipeline with PULL_ALL - rows.pipelineIndex = pipelineIndex - return rows -} - -func newQueryPipelineRows(statement *boltStmt, metadata map[string]interface{}, pipelineIndex int) *boltRows { - rows := newPipelineRows(statement, metadata, pipelineIndex) - rows.closeStatement = true // Query rows don't expose a statement, so they need to close the statement when they close - return rows -} - -// Columns returns the columns from the result -func (r *boltRows) Columns() []string { - fieldsInt, ok := r.metadata["fields"] - if !ok { - return []string{} - } - - fields, ok := fieldsInt.([]interface{}) - if !ok { - log.Errorf("Unrecognized fields from success message: %#v", fieldsInt) - return []string{} - } - - fieldsStr := make([]string, len(fields)) - for i, f := range fields { - if fieldsStr[i], ok = f.(string); !ok { - log.Errorf("Unrecognized fields from success message: %#v", fieldsInt) - return []string{} - } - } - return fieldsStr -} - -// Metadata Gets all of the metadata returned from Neo on query start -func (r *boltRows) Metadata() map[string]interface{} { - return r.metadata -} - -// Close closes the rows -func (r *boltRows) Close() error { - if r.closed { - return nil - } - - if !r.consumed { - // Discard all messages if not consumed - respInt, err := r.statement.conn.sendDiscardAllConsume() - if err != nil { - return errors.Wrap(err, "An error occurred discarding messages on row close") - } - - switch resp := respInt.(type) { - case messages.SuccessMessage: - log.Infof("Got success message: %#v", resp) - default: - return errors.New("Unrecognized response type discarding all rows: Value: %#v", resp) - } - - } else if !r.finishedConsume { - // If this is a pipeline statement, we need to "consume all" multiple times - numConsume := 1 - if r.statement.queries != nil { - numQueries := len(r.statement.queries) - if numQueries > 0 { - // So, every pipeline statement has two successes - // but by the time you get to the row object, one has - // been consumed. Hence we need to clear out the - // rest of the messages on close by taking the current - // index * 2 but removing the first success - numConsume = ((numQueries - r.pipelineIndex) * 2) - 1 - } - } - - // Clear out all unconsumed messages if we - // never finished consuming them. - _, _, err := r.statement.conn.consumeAllMultiple(numConsume) - if err != nil { - return errors.Wrap(err, "An error occurred clearing out unconsumed stream") - } - } - - r.closed = true - r.statement.rows = nil - - if r.closeStatement { - return r.statement.Close() - } - return nil -} - -// Next gets the next row result -func (r *boltRows) Next(dest []driver.Value) error { - data, _, err := r.NextNeo() - if err != nil { - return err - } - - for i, item := range data { - switch item := item.(type) { - case []interface{}, map[string]interface{}, graph.Node, graph.Path, graph.Relationship, graph.UnboundRelationship: - dest[i], err = encoding.Marshal(item) - if err != nil { - return err - } - default: - dest[i], err = driver.DefaultParameterConverter.ConvertValue(item) - if err != nil { - return err - } - } - } - - return nil - -} - -// NextNeo gets the next row result -// When the rows are completed, returns the success metadata -// and io.EOF -func (r *boltRows) NextNeo() ([]interface{}, map[string]interface{}, error) { - if r.closed { - return nil, nil, errors.New("Rows are already closed") - } - - if !r.consumed { - r.consumed = true - if err := r.statement.conn.sendPullAll(); err != nil { - r.finishedConsume = true - return nil, nil, err - } - } - - respInt, err := r.statement.conn.consume() - if err != nil { - return nil, nil, err - } - - switch resp := respInt.(type) { - case messages.SuccessMessage: - log.Infof("Got success message: %#v", resp) - r.finishedConsume = true - return nil, resp.Metadata, io.EOF - case messages.RecordMessage: - log.Infof("Got record message: %#v", resp) - return resp.Fields, nil, nil - default: - return nil, nil, errors.New("Unrecognized response type getting next query row: %#v", resp) - } -} - -func (r *boltRows) All() ([][]interface{}, map[string]interface{}, error) { - output := [][]interface{}{} - for { - row, metadata, err := r.NextNeo() - if err != nil || row == nil { - if err == io.EOF { - return output, metadata, nil - } - return output, metadata, err - } - output = append(output, row) - } -} - -// NextPipeline gets the next row result -// When the rows are completed, returns the success metadata and the next -// set of rows. -// When all rows are completed, returns io.EOF -func (r *boltRows) NextPipeline() ([]interface{}, map[string]interface{}, PipelineRows, error) { - if r.closed { - return nil, nil, nil, errors.New("Rows are already closed") - } - - respInt, err := r.statement.conn.consume() - if err != nil { - return nil, nil, nil, err - } - - switch resp := respInt.(type) { - case messages.SuccessMessage: - log.Infof("Got success message: %#v", resp) - - if r.pipelineIndex == len(r.statement.queries)-1 { - r.finishedConsume = true - return nil, nil, nil, err - } - - successResp, err := r.statement.conn.consume() - if err != nil && err != io.EOF { - return nil, nil, nil, errors.Wrap(err, "An error occurred getting next set of rows from pipeline command: %#v", successResp) - } - - success, ok := successResp.(messages.SuccessMessage) - if !ok { - return nil, nil, nil, errors.New("Unexpected response getting next set of rows from pipeline command: %#v", successResp) - } - - r.statement.rows = newPipelineRows(r.statement, success.Metadata, r.pipelineIndex+1) - r.statement.rows.closeStatement = r.closeStatement - return nil, success.Metadata, r.statement.rows, nil - - case messages.RecordMessage: - log.Infof("Got record message: %#v", resp) - return resp.Fields, nil, nil, nil - default: - return nil, nil, nil, errors.New("Unrecognized response type getting next pipeline row: %#v", resp) - } -} diff --git a/vendor/github.com/johnnadratowski/golang-neo4j-bolt-driver/stmt.go b/vendor/github.com/johnnadratowski/golang-neo4j-bolt-driver/stmt.go deleted file mode 100644 index 42a6f321..00000000 --- a/vendor/github.com/johnnadratowski/golang-neo4j-bolt-driver/stmt.go +++ /dev/null @@ -1,243 +0,0 @@ -package golangNeo4jBoltDriver - -import ( - "database/sql/driver" - - "github.com/johnnadratowski/golang-neo4j-bolt-driver/errors" - "github.com/johnnadratowski/golang-neo4j-bolt-driver/log" - "github.com/johnnadratowski/golang-neo4j-bolt-driver/structures/messages" -) - -// Stmt represents a statement to run against the database -// -// Stmt objects, and any rows prepared within ARE NOT -// THREAD SAFE. If you want to use multiple go routines with these objects, -// you should use a driver to create a new conn for each routine. -type Stmt interface { - // Close Closes the statement. See sql/driver.Stmt. - Close() error - // ExecNeo executes a query that returns no rows. Implements a Neo-friendly alternative to sql/driver. - ExecNeo(params map[string]interface{}) (Result, error) - // QueryNeo executes a query that returns data. Implements a Neo-friendly alternative to sql/driver. - QueryNeo(params map[string]interface{}) (Rows, error) -} - -// PipelineStmt represents a set of statements to run against the database -// -// PipelineStmt objects, and any rows prepared within ARE NOT -// THREAD SAFE. If you want to use multiple go routines with these objects, -// you should use a driver to create a new conn for each routine. -type PipelineStmt interface { - // Close Closes the statement. See sql/driver.Stmt. - Close() error - // ExecPipeline executes a set of queries that returns no rows. - ExecPipeline(params ...map[string]interface{}) ([]Result, error) - // QueryPipeline executes a set of queries that return data. - // Implements a Neo-friendly alternative to sql/driver. - QueryPipeline(params ...map[string]interface{}) (PipelineRows, error) -} - -type boltStmt struct { - queries []string - query string - conn *boltConn - closed bool - rows *boltRows -} - -func newStmt(query string, conn *boltConn) *boltStmt { - return &boltStmt{query: query, conn: conn} -} - -func newPipelineStmt(queries []string, conn *boltConn) *boltStmt { - return &boltStmt{queries: queries, conn: conn} -} - -// Close Closes the statement. See sql/driver.Stmt. -func (s *boltStmt) Close() error { - if s.closed { - return nil - } - - if s.rows != nil && !s.rows.closeStatement { - if err := s.rows.Close(); err != nil { - return err - } - } - - s.closed = true - s.conn.statement = nil - s.conn = nil - return nil -} - -// NumInput returns the number of placeholder parameters. See sql/driver.Stmt. -// Currently will always return -1 -func (s *boltStmt) NumInput() int { - return -1 // TODO: would need a cypher parser for this. disable for now -} - -// Exec executes a query that returns no rows. See sql/driver.Stmt. -// You must bolt encode a map to pass as []bytes for the driver value -func (s *boltStmt) Exec(args []driver.Value) (driver.Result, error) { - params, err := driverArgsToMap(args) - if err != nil { - return nil, err - } - return s.ExecNeo(params) -} - -// ExecNeo executes a query that returns no rows. Implements a Neo-friendly alternative to sql/driver. -func (s *boltStmt) ExecNeo(params map[string]interface{}) (Result, error) { - if s.closed { - return nil, errors.New("Neo4j Bolt statement already closed") - } - if s.rows != nil { - return nil, errors.New("Another query is already open") - } - - runResp, pullResp, _, err := s.conn.sendRunPullAllConsumeAll(s.query, params) - if err != nil { - return nil, err - } - - success, ok := runResp.(messages.SuccessMessage) - if !ok { - return nil, errors.New("Unrecognized response type when running exec query: %#v", success) - - } - - log.Infof("Got run success message: %#v", success) - - success, ok = pullResp.(messages.SuccessMessage) - if !ok { - return nil, errors.New("Unrecognized response when discarding exec rows: %#v", success) - } - - log.Infof("Got discard all success message: %#v", success) - - return newResult(success.Metadata), nil -} - -func (s *boltStmt) ExecPipeline(params ...map[string]interface{}) ([]Result, error) { - if s.closed { - return nil, errors.New("Neo4j Bolt statement already closed") - } - if s.rows != nil { - return nil, errors.New("Another query is already open") - } - - if len(params) != len(s.queries) { - return nil, errors.New("Must pass same number of params as there are queries") - } - - for i, query := range s.queries { - err := s.conn.sendRunPullAll(query, params[i]) - if err != nil { - return nil, errors.Wrap(err, "Error running exec query:\n\n%s\n\nWith Params:\n%#v", query, params[i]) - } - } - - log.Info("Successfully ran all pipeline queries") - - results := make([]Result, len(s.queries)) - for i := range s.queries { - runResp, err := s.conn.consume() - if err != nil { - return nil, errors.Wrap(err, "An error occurred getting result of exec command: %#v", runResp) - } - - success, ok := runResp.(messages.SuccessMessage) - if !ok { - return nil, errors.New("Unexpected response when getting exec query result: %#v", runResp) - } - - _, pullResp, err := s.conn.consumeAll() - if err != nil { - return nil, errors.Wrap(err, "An error occurred getting result of exec discard command: %#v", pullResp) - } - - success, ok = pullResp.(messages.SuccessMessage) - if !ok { - return nil, errors.New("Unexpected response when getting exec query discard result: %#v", pullResp) - } - - results[i] = newResult(success.Metadata) - - } - - return results, nil -} - -// Query executes a query that returns data. See sql/driver.Stmt. -// You must bolt encode a map to pass as []bytes for the driver value -func (s *boltStmt) Query(args []driver.Value) (driver.Rows, error) { - params, err := driverArgsToMap(args) - if err != nil { - return nil, err - } - return s.queryNeo(params) -} - -// QueryNeo executes a query that returns data. Implements a Neo-friendly alternative to sql/driver. -func (s *boltStmt) QueryNeo(params map[string]interface{}) (Rows, error) { - return s.queryNeo(params) -} - -func (s *boltStmt) queryNeo(params map[string]interface{}) (*boltRows, error) { - if s.closed { - return nil, errors.New("Neo4j Bolt statement already closed") - } - if s.rows != nil { - return nil, errors.New("Another query is already open") - } - - respInt, err := s.conn.sendRunConsume(s.query, params) - if err != nil { - return nil, err - } - - resp, ok := respInt.(messages.SuccessMessage) - if !ok { - return nil, errors.New("Unrecognized response type running query: %#v", resp) - } - - log.Infof("Got success message on run query: %#v", resp) - s.rows = newRows(s, resp.Metadata) - return s.rows, nil -} - -func (s *boltStmt) QueryPipeline(params ...map[string]interface{}) (PipelineRows, error) { - if s.closed { - return nil, errors.New("Neo4j Bolt statement already closed") - } - if s.rows != nil { - return nil, errors.New("Another query is already open") - } - - if len(params) != len(s.queries) { - return nil, errors.New("Must pass same number of params as there are queries") - } - - for i, query := range s.queries { - err := s.conn.sendRunPullAll(query, params[i]) - if err != nil { - return nil, errors.Wrap(err, "Error running query:\n\n%s\n\nWith Params:\n%#v", query, params[i]) - } - } - - log.Info("Successfully ran all pipeline queries") - - resp, err := s.conn.consume() - if err != nil { - return nil, errors.Wrap(err, "An error occurred consuming initial pipeline command") - } - - success, ok := resp.(messages.SuccessMessage) - if !ok { - return nil, errors.New("Got unexpected return message when consuming initial pipeline command: %#v", resp) - } - - s.rows = newPipelineRows(s, success.Metadata, 0) - return s.rows, nil -} diff --git a/vendor/github.com/johnnadratowski/golang-neo4j-bolt-driver/structures/doc.go b/vendor/github.com/johnnadratowski/golang-neo4j-bolt-driver/structures/doc.go deleted file mode 100644 index 60ddc41d..00000000 --- a/vendor/github.com/johnnadratowski/golang-neo4j-bolt-driver/structures/doc.go +++ /dev/null @@ -1,2 +0,0 @@ -/*Package structures contains various structures which are used by the Bolt protocol*/ -package structures diff --git a/vendor/github.com/johnnadratowski/golang-neo4j-bolt-driver/structures/graph/doc.go b/vendor/github.com/johnnadratowski/golang-neo4j-bolt-driver/structures/graph/doc.go deleted file mode 100644 index f7835262..00000000 --- a/vendor/github.com/johnnadratowski/golang-neo4j-bolt-driver/structures/graph/doc.go +++ /dev/null @@ -1,2 +0,0 @@ -/*Package graph contains structs that can be returned from the Neo4j Graph*/ -package graph diff --git a/vendor/github.com/johnnadratowski/golang-neo4j-bolt-driver/structures/graph/node.go b/vendor/github.com/johnnadratowski/golang-neo4j-bolt-driver/structures/graph/node.go deleted file mode 100644 index 75af9846..00000000 --- a/vendor/github.com/johnnadratowski/golang-neo4j-bolt-driver/structures/graph/node.go +++ /dev/null @@ -1,27 +0,0 @@ -package graph - -const ( - // NodeSignature is the signature byte for a Node object - NodeSignature = 0x4E -) - -// Node Represents a Node structure -type Node struct { - NodeIdentity int64 - Labels []string - Properties map[string]interface{} -} - -// Signature gets the signature byte for the struct -func (n Node) Signature() int { - return NodeSignature -} - -// AllFields gets the fields to encode for the struct -func (n Node) AllFields() []interface{} { - labels := make([]interface{}, len(n.Labels)) - for i, label := range n.Labels { - labels[i] = label - } - return []interface{}{n.NodeIdentity, labels, n.Properties} -} diff --git a/vendor/github.com/johnnadratowski/golang-neo4j-bolt-driver/structures/graph/path.go b/vendor/github.com/johnnadratowski/golang-neo4j-bolt-driver/structures/graph/path.go deleted file mode 100644 index 6d6f8c53..00000000 --- a/vendor/github.com/johnnadratowski/golang-neo4j-bolt-driver/structures/graph/path.go +++ /dev/null @@ -1,35 +0,0 @@ -package graph - -const ( - // PathSignature is the signature byte for a Path object - PathSignature = 0x50 -) - -// Path Represents a Path structure -type Path struct { - Nodes []Node - Relationships []UnboundRelationship - Sequence []int -} - -// Signature gets the signature byte for the struct -func (p Path) Signature() int { - return PathSignature -} - -// AllFields gets the fields to encode for the struct -func (p Path) AllFields() []interface{} { - nodes := make([]interface{}, len(p.Nodes)) - for i, node := range p.Nodes { - nodes[i] = node - } - relationships := make([]interface{}, len(p.Relationships)) - for i, relationship := range p.Relationships { - relationships[i] = relationship - } - sequences := make([]interface{}, len(p.Sequence)) - for i, sequence := range p.Sequence { - sequences[i] = sequence - } - return []interface{}{nodes, relationships, sequences} -} diff --git a/vendor/github.com/johnnadratowski/golang-neo4j-bolt-driver/structures/graph/relationship.go b/vendor/github.com/johnnadratowski/golang-neo4j-bolt-driver/structures/graph/relationship.go deleted file mode 100644 index ebd42da2..00000000 --- a/vendor/github.com/johnnadratowski/golang-neo4j-bolt-driver/structures/graph/relationship.go +++ /dev/null @@ -1,25 +0,0 @@ -package graph - -const ( - // RelationshipSignature is the signature byte for a Relationship object - RelationshipSignature = 0x52 -) - -// Relationship Represents a Relationship structure -type Relationship struct { - RelIdentity int64 - StartNodeIdentity int64 - EndNodeIdentity int64 - Type string - Properties map[string]interface{} -} - -// Signature gets the signature byte for the struct -func (r Relationship) Signature() int { - return RelationshipSignature -} - -// AllFields gets the fields to encode for the struct -func (r Relationship) AllFields() []interface{} { - return []interface{}{r.RelIdentity, r.StartNodeIdentity, r.EndNodeIdentity, r.Type, r.Properties} -} diff --git a/vendor/github.com/johnnadratowski/golang-neo4j-bolt-driver/structures/graph/unbound_relationship.go b/vendor/github.com/johnnadratowski/golang-neo4j-bolt-driver/structures/graph/unbound_relationship.go deleted file mode 100644 index 9d581535..00000000 --- a/vendor/github.com/johnnadratowski/golang-neo4j-bolt-driver/structures/graph/unbound_relationship.go +++ /dev/null @@ -1,23 +0,0 @@ -package graph - -const ( - // UnboundRelationshipSignature is the signature byte for a UnboundRelationship object - UnboundRelationshipSignature = 0x72 -) - -// UnboundRelationship Represents a UnboundRelationship structure -type UnboundRelationship struct { - RelIdentity int64 - Type string - Properties map[string]interface{} -} - -// Signature gets the signature byte for the struct -func (r UnboundRelationship) Signature() int { - return UnboundRelationshipSignature -} - -// AllFields gets the fields to encode for the struct -func (r UnboundRelationship) AllFields() []interface{} { - return []interface{}{r.RelIdentity, r.Type, r.Properties} -} diff --git a/vendor/github.com/johnnadratowski/golang-neo4j-bolt-driver/structures/messages/ack_failure.go b/vendor/github.com/johnnadratowski/golang-neo4j-bolt-driver/structures/messages/ack_failure.go deleted file mode 100644 index a6c73511..00000000 --- a/vendor/github.com/johnnadratowski/golang-neo4j-bolt-driver/structures/messages/ack_failure.go +++ /dev/null @@ -1,24 +0,0 @@ -package messages - -const ( - // AckFailureMessageSignature is the signature byte for the ACK_FAILURE message - AckFailureMessageSignature = 0x0E -) - -// AckFailureMessage Represents an ACK_FAILURE message -type AckFailureMessage struct{} - -// NewAckFailureMessage Gets a new AckFailureMessage struct -func NewAckFailureMessage() AckFailureMessage { - return AckFailureMessage{} -} - -// Signature gets the signature byte for the struct -func (i AckFailureMessage) Signature() int { - return AckFailureMessageSignature -} - -// AllFields gets the fields to encode for the struct -func (i AckFailureMessage) AllFields() []interface{} { - return []interface{}{} -} diff --git a/vendor/github.com/johnnadratowski/golang-neo4j-bolt-driver/structures/messages/discard_all.go b/vendor/github.com/johnnadratowski/golang-neo4j-bolt-driver/structures/messages/discard_all.go deleted file mode 100644 index e7a9b789..00000000 --- a/vendor/github.com/johnnadratowski/golang-neo4j-bolt-driver/structures/messages/discard_all.go +++ /dev/null @@ -1,24 +0,0 @@ -package messages - -const ( - // DiscardAllMessageSignature is the signature byte for the DISCARD_ALL message - DiscardAllMessageSignature = 0x2F -) - -// DiscardAllMessage Represents an DISCARD_ALL message -type DiscardAllMessage struct{} - -// NewDiscardAllMessage Gets a new DiscardAllMessage struct -func NewDiscardAllMessage() DiscardAllMessage { - return DiscardAllMessage{} -} - -// Signature gets the signature byte for the struct -func (i DiscardAllMessage) Signature() int { - return DiscardAllMessageSignature -} - -// AllFields gets the fields to encode for the struct -func (i DiscardAllMessage) AllFields() []interface{} { - return []interface{}{} -} diff --git a/vendor/github.com/johnnadratowski/golang-neo4j-bolt-driver/structures/messages/doc.go b/vendor/github.com/johnnadratowski/golang-neo4j-bolt-driver/structures/messages/doc.go deleted file mode 100644 index 225bc9ba..00000000 --- a/vendor/github.com/johnnadratowski/golang-neo4j-bolt-driver/structures/messages/doc.go +++ /dev/null @@ -1,2 +0,0 @@ -/*Package messages contains structs that represent the messages that get sent using the Bolt protocol*/ -package messages diff --git a/vendor/github.com/johnnadratowski/golang-neo4j-bolt-driver/structures/messages/failure.go b/vendor/github.com/johnnadratowski/golang-neo4j-bolt-driver/structures/messages/failure.go deleted file mode 100644 index 434ecf7e..00000000 --- a/vendor/github.com/johnnadratowski/golang-neo4j-bolt-driver/structures/messages/failure.go +++ /dev/null @@ -1,36 +0,0 @@ -package messages - -import "fmt" - -const ( - // FailureMessageSignature is the signature byte for the FAILURE message - FailureMessageSignature = 0x7F -) - -// FailureMessage Represents an FAILURE message -type FailureMessage struct { - Metadata map[string]interface{} -} - -// NewFailureMessage Gets a new FailureMessage struct -func NewFailureMessage(metadata map[string]interface{}) FailureMessage { - return FailureMessage{ - Metadata: metadata, - } -} - -// Signature gets the signature byte for the struct -func (i FailureMessage) Signature() int { - return FailureMessageSignature -} - -// AllFields gets the fields to encode for the struct -func (i FailureMessage) AllFields() []interface{} { - return []interface{}{i.Metadata} -} - -// Error is the implementation of the Golang error interface so a failure message -// can be treated like a normal error -func (i FailureMessage) Error() string { - return fmt.Sprintf("%#v", i) -} diff --git a/vendor/github.com/johnnadratowski/golang-neo4j-bolt-driver/structures/messages/ignored.go b/vendor/github.com/johnnadratowski/golang-neo4j-bolt-driver/structures/messages/ignored.go deleted file mode 100644 index 1fc7e2f8..00000000 --- a/vendor/github.com/johnnadratowski/golang-neo4j-bolt-driver/structures/messages/ignored.go +++ /dev/null @@ -1,24 +0,0 @@ -package messages - -const ( - // IgnoredMessageSignature is the signature byte for the IGNORED message - IgnoredMessageSignature = 0x7E -) - -// IgnoredMessage Represents an IGNORED message -type IgnoredMessage struct{} - -// NewIgnoredMessage Gets a new IgnoredMessage struct -func NewIgnoredMessage() IgnoredMessage { - return IgnoredMessage{} -} - -// Signature gets the signature byte for the struct -func (i IgnoredMessage) Signature() int { - return IgnoredMessageSignature -} - -// AllFields gets the fields to encode for the struct -func (i IgnoredMessage) AllFields() []interface{} { - return []interface{}{} -} diff --git a/vendor/github.com/johnnadratowski/golang-neo4j-bolt-driver/structures/messages/init.go b/vendor/github.com/johnnadratowski/golang-neo4j-bolt-driver/structures/messages/init.go deleted file mode 100644 index cb8aff60..00000000 --- a/vendor/github.com/johnnadratowski/golang-neo4j-bolt-driver/structures/messages/init.go +++ /dev/null @@ -1,43 +0,0 @@ -package messages - -const ( - // InitMessageSignature is the signature byte for the INIT message - InitMessageSignature = 0x01 -) - -// InitMessage Represents an INIT message -type InitMessage struct { - clientName string - authToken map[string]interface{} -} - -// NewInitMessage Gets a new InitMessage struct -func NewInitMessage(clientName string, user string, password string) InitMessage { - var authToken map[string]interface{} - if user == "" { - authToken = map[string]interface{}{ - "scheme": "none", - } - } else { - authToken = map[string]interface{}{ - "scheme": "basic", - "principal": user, - "credentials": password, - } - } - - return InitMessage{ - clientName: clientName, - authToken: authToken, - } -} - -// Signature gets the signature byte for the struct -func (i InitMessage) Signature() int { - return InitMessageSignature -} - -// AllFields gets the fields to encode for the struct -func (i InitMessage) AllFields() []interface{} { - return []interface{}{i.clientName, i.authToken} -} diff --git a/vendor/github.com/johnnadratowski/golang-neo4j-bolt-driver/structures/messages/pull_all.go b/vendor/github.com/johnnadratowski/golang-neo4j-bolt-driver/structures/messages/pull_all.go deleted file mode 100644 index c0daa3ed..00000000 --- a/vendor/github.com/johnnadratowski/golang-neo4j-bolt-driver/structures/messages/pull_all.go +++ /dev/null @@ -1,24 +0,0 @@ -package messages - -const ( - // PullAllMessageSignature is the signature byte for the PULL_ALL message - PullAllMessageSignature = 0x3F -) - -// PullAllMessage Represents an PULL_ALL message -type PullAllMessage struct{} - -// NewPullAllMessage Gets a new PullAllMessage struct -func NewPullAllMessage() PullAllMessage { - return PullAllMessage{} -} - -// Signature gets the signature byte for the struct -func (i PullAllMessage) Signature() int { - return PullAllMessageSignature -} - -// AllFields gets the fields to encode for the struct -func (i PullAllMessage) AllFields() []interface{} { - return []interface{}{} -} diff --git a/vendor/github.com/johnnadratowski/golang-neo4j-bolt-driver/structures/messages/record.go b/vendor/github.com/johnnadratowski/golang-neo4j-bolt-driver/structures/messages/record.go deleted file mode 100644 index be17d1dd..00000000 --- a/vendor/github.com/johnnadratowski/golang-neo4j-bolt-driver/structures/messages/record.go +++ /dev/null @@ -1,28 +0,0 @@ -package messages - -const ( - // RecordMessageSignature is the signature byte for the RECORD message - RecordMessageSignature = 0x71 -) - -// RecordMessage Represents an RECORD message -type RecordMessage struct { - Fields []interface{} -} - -// NewRecordMessage Gets a new RecordMessage struct -func NewRecordMessage(fields []interface{}) RecordMessage { - return RecordMessage{ - Fields: fields, - } -} - -// Signature gets the signature byte for the struct -func (i RecordMessage) Signature() int { - return RecordMessageSignature -} - -// AllFields gets the fields to encode for the struct -func (i RecordMessage) AllFields() []interface{} { - return []interface{}{i.Fields} -} diff --git a/vendor/github.com/johnnadratowski/golang-neo4j-bolt-driver/structures/messages/reset.go b/vendor/github.com/johnnadratowski/golang-neo4j-bolt-driver/structures/messages/reset.go deleted file mode 100644 index 3233b273..00000000 --- a/vendor/github.com/johnnadratowski/golang-neo4j-bolt-driver/structures/messages/reset.go +++ /dev/null @@ -1,24 +0,0 @@ -package messages - -const ( - // ResetMessageSignature is the signature byte for the RESET message - ResetMessageSignature = 0x0F -) - -// ResetMessage Represents an RESET message -type ResetMessage struct{} - -// NewResetMessage Gets a new ResetMessage struct -func NewResetMessage() ResetMessage { - return ResetMessage{} -} - -// Signature gets the signature byte for the struct -func (i ResetMessage) Signature() int { - return ResetMessageSignature -} - -// AllFields gets the fields to encode for the struct -func (i ResetMessage) AllFields() []interface{} { - return []interface{}{} -} diff --git a/vendor/github.com/johnnadratowski/golang-neo4j-bolt-driver/structures/messages/run.go b/vendor/github.com/johnnadratowski/golang-neo4j-bolt-driver/structures/messages/run.go deleted file mode 100644 index 65245567..00000000 --- a/vendor/github.com/johnnadratowski/golang-neo4j-bolt-driver/structures/messages/run.go +++ /dev/null @@ -1,30 +0,0 @@ -package messages - -const ( - // RunMessageSignature is the signature byte for the RUN message - RunMessageSignature = 0x10 -) - -// RunMessage Represents an RUN message -type RunMessage struct { - statement string - parameters map[string]interface{} -} - -// NewRunMessage Gets a new RunMessage struct -func NewRunMessage(statement string, parameters map[string]interface{}) RunMessage { - return RunMessage{ - statement: statement, - parameters: parameters, - } -} - -// Signature gets the signature byte for the struct -func (i RunMessage) Signature() int { - return RunMessageSignature -} - -// AllFields gets the fields to encode for the struct -func (i RunMessage) AllFields() []interface{} { - return []interface{}{i.statement, i.parameters} -} diff --git a/vendor/github.com/johnnadratowski/golang-neo4j-bolt-driver/structures/messages/success.go b/vendor/github.com/johnnadratowski/golang-neo4j-bolt-driver/structures/messages/success.go deleted file mode 100644 index 9a0337f4..00000000 --- a/vendor/github.com/johnnadratowski/golang-neo4j-bolt-driver/structures/messages/success.go +++ /dev/null @@ -1,28 +0,0 @@ -package messages - -const ( - // SuccessMessageSignature is the signature byte for the SUCCESS message - SuccessMessageSignature = 0x70 -) - -// SuccessMessage Represents an SUCCESS message -type SuccessMessage struct { - Metadata map[string]interface{} -} - -// NewSuccessMessage Gets a new SuccessMessage struct -func NewSuccessMessage(metadata map[string]interface{}) SuccessMessage { - return SuccessMessage{ - Metadata: metadata, - } -} - -// Signature gets the signature byte for the struct -func (i SuccessMessage) Signature() int { - return SuccessMessageSignature -} - -// AllFields gets the fields to encode for the struct -func (i SuccessMessage) AllFields() []interface{} { - return []interface{}{i.Metadata} -} diff --git a/vendor/github.com/johnnadratowski/golang-neo4j-bolt-driver/structures/structures.go b/vendor/github.com/johnnadratowski/golang-neo4j-bolt-driver/structures/structures.go deleted file mode 100644 index 7139c948..00000000 --- a/vendor/github.com/johnnadratowski/golang-neo4j-bolt-driver/structures/structures.go +++ /dev/null @@ -1,7 +0,0 @@ -package structures - -// Structure represents a Neo4J structure -type Structure interface { - Signature() int - AllFields() []interface{} -} diff --git a/vendor/github.com/johnnadratowski/golang-neo4j-bolt-driver/tx.go b/vendor/github.com/johnnadratowski/golang-neo4j-bolt-driver/tx.go deleted file mode 100644 index e70bbefb..00000000 --- a/vendor/github.com/johnnadratowski/golang-neo4j-bolt-driver/tx.go +++ /dev/null @@ -1,96 +0,0 @@ -package golangNeo4jBoltDriver - -import ( - "github.com/johnnadratowski/golang-neo4j-bolt-driver/errors" - "github.com/johnnadratowski/golang-neo4j-bolt-driver/log" - "github.com/johnnadratowski/golang-neo4j-bolt-driver/structures/messages" -) - -// Tx represents a transaction -type Tx interface { - // Commit commits the transaction - Commit() error - // Rollback rolls back the transaction - Rollback() error -} - -type boltTx struct { - conn *boltConn - closed bool -} - -func newTx(conn *boltConn) *boltTx { - return &boltTx{ - conn: conn, - } -} - -// Commit commits and closes the transaction -func (t *boltTx) Commit() error { - if t.closed { - return errors.New("Transaction already closed") - } - if t.conn.statement != nil { - if err := t.conn.statement.Close(); err != nil { - return errors.Wrap(err, "An error occurred closing open rows in transaction Commit") - } - } - - successInt, pullInt, err := t.conn.sendRunPullAllConsumeSingle("COMMIT", nil) - if err != nil { - return errors.Wrap(err, "An error occurred committing transaction") - } - - success, ok := successInt.(messages.SuccessMessage) - if !ok { - return errors.New("Unrecognized response type committing transaction: %#v", success) - } - - log.Infof("Got success message committing transaction: %#v", success) - - pull, ok := pullInt.(messages.SuccessMessage) - if !ok { - return errors.New("Unrecognized response type pulling transaction: %#v", pull) - } - - log.Infof("Got success message pulling transaction: %#v", pull) - - t.conn.transaction = nil - t.closed = true - return err -} - -// Rollback rolls back and closes the transaction -func (t *boltTx) Rollback() error { - if t.closed { - return errors.New("Transaction already closed") - } - if t.conn.statement != nil { - if err := t.conn.statement.Close(); err != nil { - return errors.Wrap(err, "An error occurred closing open rows in transaction Rollback") - } - } - - successInt, pullInt, err := t.conn.sendRunPullAllConsumeSingle("ROLLBACK", nil) - if err != nil { - return errors.Wrap(err, "An error occurred rolling back transaction") - } - - success, ok := successInt.(messages.SuccessMessage) - if !ok { - return errors.New("Unrecognized response type rolling back transaction: %#v", success) - } - - log.Infof("Got success message rolling back transaction: %#v", success) - - pull, ok := pullInt.(messages.SuccessMessage) - if !ok { - return errors.New("Unrecognized response type pulling transaction: %#v", pull) - } - - log.Infof("Got success message pulling transaction: %#v", pull) - - t.conn.transaction = nil - t.closed = true - return err -} diff --git a/vendor/github.com/johnnadratowski/golang-neo4j-bolt-driver/util.go b/vendor/github.com/johnnadratowski/golang-neo4j-bolt-driver/util.go deleted file mode 100644 index 4e16ab73..00000000 --- a/vendor/github.com/johnnadratowski/golang-neo4j-bolt-driver/util.go +++ /dev/null @@ -1,52 +0,0 @@ -package golangNeo4jBoltDriver - -import ( - "database/sql/driver" - "errors" - "fmt" - - "github.com/johnnadratowski/golang-neo4j-bolt-driver/encoding" -) - -// sprintByteHex returns a formatted string of the byte array in hexadecimal -// with a nicely formatted human-readable output -func sprintByteHex(b []byte) string { - output := "\t" - for i, b := range b { - output += fmt.Sprintf("%x", b) - if (i+1)%16 == 0 { - output += "\n\n\t" - } else if (i+1)%4 == 0 { - output += " " - } else { - output += " " - } - } - output += "\n" - - return output -} - -// driverArgsToMap turns driver.Value list into a parameter map -// for neo4j parameters -func driverArgsToMap(args []driver.Value) (map[string]interface{}, error) { - output := map[string]interface{}{} - for _, arg := range args { - argBytes, ok := arg.([]byte) - if !ok { - return nil, errors.New("You must pass only a gob encoded map to the Exec/Query args") - } - - m, err := encoding.Unmarshal(argBytes) - if err != nil { - return nil, err - } - - for k, v := range m.(map[string]interface{}) { - output[k] = v - } - - } - - return output, nil -} diff --git a/vendor/vendor.json b/vendor/vendor.json index dcd62d84..9f0e2634 100644 --- a/vendor/vendor.json +++ b/vendor/vendor.json @@ -14,95 +14,89 @@ "revision": "221f2ea9e3719581eb8f75f9e936f31c0de53bb7", "revisionTime": "2019-03-22T12:13:12Z" }, - { - "checksumSHA1": "IaAUZHp8VHqYnRTXpmyhvLgDZfA=", - "path": "github.com/ONSdigital/dp-filter/observation", - "revision": "df1112706241a452aa44f4778b303239984d37cf", - "revisionTime": "2019-01-29T13:58:41Z" - }, { "checksumSHA1": "4hOuaa2bRo+okR3xsWFCTeo1NNc=", "path": "github.com/ONSdigital/dp-graph/config", - "revision": "573b5a7a0f4e52bc3aa7a7a0d283111cb139d06e", - "revisionTime": "2019-08-02T16:47:15Z" + "revision": "8ccc8f292498d740cb311dbca0883b789118e616", + "revisionTime": "2019-08-06T12:38:18Z" }, { "checksumSHA1": "Qg+JFeVzRnx1l8wM3ZE04gLExcs=", "path": "github.com/ONSdigital/dp-graph/graph", - "revision": "573b5a7a0f4e52bc3aa7a7a0d283111cb139d06e", - "revisionTime": "2019-08-02T16:47:15Z" + "revision": "8ccc8f292498d740cb311dbca0883b789118e616", + "revisionTime": "2019-08-06T12:38:18Z" }, { "checksumSHA1": "263RXdCaG1tXdn57RTXlzP1TYS8=", "path": "github.com/ONSdigital/dp-graph/graph/driver", - "revision": "573b5a7a0f4e52bc3aa7a7a0d283111cb139d06e", - "revisionTime": "2019-08-02T16:47:15Z" + "revision": "8ccc8f292498d740cb311dbca0883b789118e616", + "revisionTime": "2019-08-06T12:38:18Z" }, { "checksumSHA1": "PyMyKOBc90I+59WEPLD6Aovpv/4=", "path": "github.com/ONSdigital/dp-graph/mock", - "revision": "573b5a7a0f4e52bc3aa7a7a0d283111cb139d06e", - "revisionTime": "2019-08-02T16:47:15Z" + "revision": "8ccc8f292498d740cb311dbca0883b789118e616", + "revisionTime": "2019-08-06T12:38:18Z" }, { "checksumSHA1": "rlvdEFCCXvGZW0R5HBM0FEEhN38=", "path": "github.com/ONSdigital/dp-graph/neo4j", - "revision": "573b5a7a0f4e52bc3aa7a7a0d283111cb139d06e", - "revisionTime": "2019-08-02T16:47:15Z" + "revision": "8ccc8f292498d740cb311dbca0883b789118e616", + "revisionTime": "2019-08-06T12:38:18Z" }, { "checksumSHA1": "u0TDiIBM7IO/NZDXOvEwm3xbqvw=", "path": "github.com/ONSdigital/dp-graph/neo4j/mapper", - "revision": "573b5a7a0f4e52bc3aa7a7a0d283111cb139d06e", - "revisionTime": "2019-08-02T16:47:15Z" + "revision": "8ccc8f292498d740cb311dbca0883b789118e616", + "revisionTime": "2019-08-06T12:38:18Z" }, { "checksumSHA1": "fWNLT90j5B/ygFLfYpUB+TMoMaA=", "path": "github.com/ONSdigital/dp-graph/neo4j/neo4jdriver", - "revision": "573b5a7a0f4e52bc3aa7a7a0d283111cb139d06e", - "revisionTime": "2019-08-02T16:47:15Z" + "revision": "8ccc8f292498d740cb311dbca0883b789118e616", + "revisionTime": "2019-08-06T12:38:18Z" }, { "checksumSHA1": "QV5LGj4rOKWT5tvkZdzixj4D2OM=", "path": "github.com/ONSdigital/dp-graph/neo4j/query", - "revision": "573b5a7a0f4e52bc3aa7a7a0d283111cb139d06e", - "revisionTime": "2019-08-02T16:47:15Z" + "revision": "8ccc8f292498d740cb311dbca0883b789118e616", + "revisionTime": "2019-08-06T12:38:18Z" }, { - "checksumSHA1": "Ww/x2y8TYmMSyoXW+YQ8OpXw0DU=", + "checksumSHA1": "2shFkZlSu0cDXE92opFBihb8xjk=", "path": "github.com/ONSdigital/dp-graph/neptune", - "revision": "573b5a7a0f4e52bc3aa7a7a0d283111cb139d06e", - "revisionTime": "2019-08-02T16:47:15Z" + "revision": "8ccc8f292498d740cb311dbca0883b789118e616", + "revisionTime": "2019-08-06T12:38:18Z" }, { "checksumSHA1": "c8Q5ZPmuDWCGuJWUYMEj3gWNx2c=", "path": "github.com/ONSdigital/dp-graph/neptune/driver", - "revision": "573b5a7a0f4e52bc3aa7a7a0d283111cb139d06e", - "revisionTime": "2019-08-02T16:47:15Z" + "revision": "8ccc8f292498d740cb311dbca0883b789118e616", + "revisionTime": "2019-08-06T12:38:18Z" }, { - "checksumSHA1": "hDxHE2IdkTwq79uP85i5nnO/Exc=", + "checksumSHA1": "CmMvyIaWLrrLiiWlWhzfRJBHYeQ=", "path": "github.com/ONSdigital/dp-graph/neptune/internal", - "revision": "573b5a7a0f4e52bc3aa7a7a0d283111cb139d06e", - "revisionTime": "2019-08-02T16:47:15Z" + "revision": "8ccc8f292498d740cb311dbca0883b789118e616", + "revisionTime": "2019-08-06T12:38:18Z" }, { - "checksumSHA1": "3xojQzvkGmJaZenf+/irFriKoNk=", + "checksumSHA1": "Um7W2WQvdmZ4AhAJP2gDvm597hs=", "path": "github.com/ONSdigital/dp-graph/neptune/query", - "revision": "573b5a7a0f4e52bc3aa7a7a0d283111cb139d06e", - "revisionTime": "2019-08-02T16:47:15Z" + "revision": "8ccc8f292498d740cb311dbca0883b789118e616", + "revisionTime": "2019-08-06T12:38:18Z" }, { "checksumSHA1": "DRohWZAkyrLLQ5B5yq/bngMZbgM=", "path": "github.com/ONSdigital/dp-graph/observation", - "revision": "573b5a7a0f4e52bc3aa7a7a0d283111cb139d06e", - "revisionTime": "2019-08-02T16:47:15Z" + "revision": "8ccc8f292498d740cb311dbca0883b789118e616", + "revisionTime": "2019-08-06T12:38:18Z" }, { "checksumSHA1": "ZVHJ3XHyBXaEFq1DN04u4+z6nP4=", "path": "github.com/ONSdigital/dp-graph/observation/observationtest", - "revision": "573b5a7a0f4e52bc3aa7a7a0d283111cb139d06e", - "revisionTime": "2019-08-02T16:47:15Z" + "revision": "8ccc8f292498d740cb311dbca0883b789118e616", + "revisionTime": "2019-08-06T12:38:18Z" }, { "checksumSHA1": "sX7Krcb3Xi+QkzybgFfBj6hJ81A=", @@ -255,10 +249,10 @@ "revisionTime": "2019-07-18T13:40:34Z" }, { - "checksumSHA1": "gssBNtoM4Aw4vs4psO1vYu3HEhw=", + "checksumSHA1": "gv/+4ca/1dLl91OvlCxf6T9/040=", "path": "github.com/ONSdigital/gremgo-neptune", - "revision": "d65314667aa09cfea5be4477a2b68f29d14a3ddb", - "revisionTime": "2019-08-01T15:10:53Z" + "revision": "c3c614e5b650ce75db31c0b9058291a164fb5aa2", + "revisionTime": "2019-08-06T13:50:47Z" }, { "checksumSHA1": "+Jp0tVXfQ1TM8T+oun82oJtME5U=", @@ -302,12 +296,6 @@ "revision": "6920413b753350672215a083e0f9d5c270a21075", "revisionTime": "2017-11-28T09:28:02Z" }, - { - "checksumSHA1": "uE/knpBWtpFt1k/xZmpwSs+BhIs=", - "path": "github.com/gedge/graphson", - "revision": "d39cb8fe4384259290719c4b5693f39f0d8b85ca", - "revisionTime": "2019-05-31T09:24:26Z" - }, { "checksumSHA1": "C0PrqJwZS9A+Izfrk+YlZQJFsvg=", "path": "github.com/globalsign/mgo", @@ -382,48 +370,6 @@ "revision": "80c2d40e9b91f2ef7a9c1a403aeec64d1b89a9a6", "revisionTime": "2019-04-27T04:03:06Z" }, - { - "checksumSHA1": "TsX+LuxHhV9GFmua8C1nxflFcTA=", - "path": "github.com/johnnadratowski/golang-neo4j-bolt-driver", - "revision": "1108d6e66ccf2c8e68ab26b5f64e6c0a2ad00899", - "revisionTime": "2017-12-18T14:36:11Z" - }, - { - "checksumSHA1": "GYtNDxyckMgJew8cMZggWD9xfhg=", - "path": "github.com/johnnadratowski/golang-neo4j-bolt-driver/encoding", - "revision": "2387cc1f01254d3a0055e034f5716278a1f420c7", - "revisionTime": "2016-12-20T21:52:15Z" - }, - { - "checksumSHA1": "9lgMFoaIFZe75vv7ln+IbPKHasE=", - "path": "github.com/johnnadratowski/golang-neo4j-bolt-driver/errors", - "revision": "2387cc1f01254d3a0055e034f5716278a1f420c7", - "revisionTime": "2016-12-20T21:52:15Z" - }, - { - "checksumSHA1": "MeB74aEJl/Vif3wfI/yyskLXgQ8=", - "path": "github.com/johnnadratowski/golang-neo4j-bolt-driver/log", - "revision": "2387cc1f01254d3a0055e034f5716278a1f420c7", - "revisionTime": "2016-12-20T21:52:15Z" - }, - { - "checksumSHA1": "QBWn/ajykCeEt1W21Ufl9RxLeX4=", - "path": "github.com/johnnadratowski/golang-neo4j-bolt-driver/structures", - "revision": "2387cc1f01254d3a0055e034f5716278a1f420c7", - "revisionTime": "2016-12-20T21:52:15Z" - }, - { - "checksumSHA1": "vLBWZ/5wRQ4PIyIOW3dbXlKbd3s=", - "path": "github.com/johnnadratowski/golang-neo4j-bolt-driver/structures/graph", - "revision": "2387cc1f01254d3a0055e034f5716278a1f420c7", - "revisionTime": "2016-12-20T21:52:15Z" - }, - { - "checksumSHA1": "WSdNJOxRdPV0gCzq7mX6cqIf3/c=", - "path": "github.com/johnnadratowski/golang-neo4j-bolt-driver/structures/messages", - "revision": "2387cc1f01254d3a0055e034f5716278a1f420c7", - "revisionTime": "2016-12-20T21:52:15Z" - }, { "checksumSHA1": "cIiyvAduLLFvu+tg1Qr5Jw3jeWo=", "path": "github.com/jtolds/gls", From 1616475742af6fc5341265d05df5633d452460e4 Mon Sep 17 00:00:00 2001 From: Eleanor Deal Date: Thu, 8 Aug 2019 09:59:36 +0100 Subject: [PATCH 05/15] Fix quotes in makefile for graph vars --- Makefile | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Makefile b/Makefile index f13961bd..9e8d16dc 100644 --- a/Makefile +++ b/Makefile @@ -7,8 +7,8 @@ BIN_DIR?=. export GOOS?=$(shell go env GOOS) export GOARCH?=$(shell go env GOARCH) -export GRAPH_DRIVER_TYPE?="neptune" -export GRAPH_ADDR?="ws://localhost:8182/gremlin" +export GRAPH_DRIVER_TYPE?=neptune +export GRAPH_ADDR?=ws://localhost:8182/gremlin export ENABLE_PRIVATE_ENDPOINTS?=true From 48d22024e781aaed6136977d29e2d3d99c279605 Mon Sep 17 00:00:00 2001 From: CarlHembrough Date: Mon, 11 May 2020 10:02:26 +0100 Subject: [PATCH 06/15] Upgrade dp-graph to version 2.0.5 --- go.mod | 2 +- go.sum | 4 ++++ 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/go.mod b/go.mod index df974b13..ca61d771 100644 --- a/go.mod +++ b/go.mod @@ -5,7 +5,7 @@ go 1.13 require ( github.com/ONSdigital/dp-api-clients-go v1.9.0 github.com/ONSdigital/dp-authorisation v0.0.0-20191018110224-d4543527d6cb - github.com/ONSdigital/dp-graph/v2 v2.0.4 + github.com/ONSdigital/dp-graph/v2 v2.0.5 github.com/ONSdigital/dp-healthcheck v1.0.2 github.com/ONSdigital/dp-kafka v1.1.5 github.com/ONSdigital/dp-mongodb v1.2.0 diff --git a/go.sum b/go.sum index c95b8ecf..7607e68d 100644 --- a/go.sum +++ b/go.sum @@ -7,6 +7,10 @@ github.com/ONSdigital/dp-authorisation v0.0.0-20191018110224-d4543527d6cb/go.mod github.com/ONSdigital/dp-frontend-models v1.1.0/go.mod h1:TT96P7Mi69N3Tc/jFNdbjiwG4GAaMjP26HLotFQ6BPw= github.com/ONSdigital/dp-graph/v2 v2.0.4 h1:KwACJNL5r3P41yc3gZPF1fpSfWVSzCfd3DpL2qLgUZI= github.com/ONSdigital/dp-graph/v2 v2.0.4/go.mod h1:jh7BsDGMG0VgNtSvc3hlA2wXs3H1cwdWCp84VGzMVB8= +github.com/ONSdigital/dp-graph/v2 v2.0.5-0.20200506094018-cf6a6309416b h1:wIByzvaqR/eS9DKh46w0kqPAE/KyALwoqqiE7+rs9Os= +github.com/ONSdigital/dp-graph/v2 v2.0.5-0.20200506094018-cf6a6309416b/go.mod h1:jh7BsDGMG0VgNtSvc3hlA2wXs3H1cwdWCp84VGzMVB8= +github.com/ONSdigital/dp-graph/v2 v2.0.5 h1:i5+uPKgYTI2Sdyc6ecAYkFaDFMuo7jx88cZEpmAoAYA= +github.com/ONSdigital/dp-graph/v2 v2.0.5/go.mod h1:jh7BsDGMG0VgNtSvc3hlA2wXs3H1cwdWCp84VGzMVB8= github.com/ONSdigital/dp-healthcheck v0.0.0-20200131122546-9db6d3f0494e/go.mod h1:zighxZ/0m5u7zo0eAr8XFlA+Dz2ic7A1vna6YXvhCjQ= github.com/ONSdigital/dp-healthcheck v1.0.0/go.mod h1:zighxZ/0m5u7zo0eAr8XFlA+Dz2ic7A1vna6YXvhCjQ= github.com/ONSdigital/dp-healthcheck v1.0.2 h1:N8SzpYzdixVgJS9NMzTBA2RZ2bi3Am1wE5F8ROEpTYw= From 1ca369933775f2b22da283744f9fb309f38eb3d0 Mon Sep 17 00:00:00 2001 From: CarlHembrough Date: Tue, 12 May 2020 09:00:41 +0100 Subject: [PATCH 07/15] Remove redundant vendor directory --- .../ONSdigital/dp-graph/neptune/codelist.go | 229 ---- .../dp-graph/neptune/codelistsdataset.go | 165 --- .../ONSdigital/dp-graph/neptune/dimension.go | 35 - .../dp-graph/neptune/driver/driver.go | 23 - .../dp-graph/neptune/driver/healthcheck.go | 14 - .../dp-graph/neptune/driver/neptunepool.go | 24 - .../ONSdigital/dp-graph/neptune/hierarchy.go | 296 ----- .../ONSdigital/dp-graph/neptune/instance.go | 162 --- .../neptune/internal/mockpoolutils.go | 184 --- .../dp-graph/neptune/internal/pool.go | 409 ------ .../ONSdigital/dp-graph/neptune/mapper.go | 132 -- .../dp-graph/neptune/mockedneptune.go | 24 - .../ONSdigital/dp-graph/neptune/neptune.go | 258 ---- .../dp-graph/neptune/observation.go | 111 -- .../dp-graph/neptune/query/query.go | 109 -- .../ONSdigital/graphson/deserialize.go | 249 ---- .../github.com/ONSdigital/graphson/types.go | 153 --- .../github.com/ONSdigital/graphson/utils.go | 246 ---- .../ONSdigital/graphson/validation_utils.go | 94 -- .../gremgo-neptune/Dockerfile.gremlin | 1 - .../ONSdigital/gremgo-neptune/Makefile | 21 - .../ONSdigital/gremgo-neptune/README.md | 41 - .../ONSdigital/gremgo-neptune/TODO.md | 8 - .../ONSdigital/gremgo-neptune/client.go | 514 -------- .../gremgo-neptune/configuration.go | 42 - .../ONSdigital/gremgo-neptune/connection.go | 270 ---- .../ONSdigital/gremgo-neptune/cursor.go | 86 -- .../ONSdigital/gremgo-neptune/go.mod | 9 - .../ONSdigital/gremgo-neptune/go.sum | 25 - .../ONSdigital/gremgo-neptune/pool.go | 535 -------- .../ONSdigital/gremgo-neptune/request.go | 96 -- .../ONSdigital/gremgo-neptune/response.go | 231 ---- .../ONSdigital/gremgo-neptune/tags.go | 42 - vendor/github.com/gofrs/uuid/LICENSE | 20 - vendor/github.com/gofrs/uuid/README.md | 109 -- vendor/github.com/gofrs/uuid/codec.go | 212 --- vendor/github.com/gofrs/uuid/fuzz.go | 47 - vendor/github.com/gofrs/uuid/generator.go | 299 ----- vendor/github.com/gofrs/uuid/sql.go | 109 -- vendor/github.com/gofrs/uuid/uuid.go | 250 ---- vendor/github.com/gorilla/websocket/AUTHORS | 9 - vendor/github.com/gorilla/websocket/LICENSE | 22 - vendor/github.com/gorilla/websocket/README.md | 64 - vendor/github.com/gorilla/websocket/client.go | 395 ------ .../gorilla/websocket/client_clone.go | 16 - .../gorilla/websocket/client_clone_legacy.go | 38 - .../gorilla/websocket/compression.go | 148 --- vendor/github.com/gorilla/websocket/conn.go | 1163 ----------------- .../gorilla/websocket/conn_write.go | 15 - .../gorilla/websocket/conn_write_legacy.go | 18 - vendor/github.com/gorilla/websocket/doc.go | 227 ---- vendor/github.com/gorilla/websocket/go.mod | 1 - vendor/github.com/gorilla/websocket/go.sum | 2 - vendor/github.com/gorilla/websocket/join.go | 42 - vendor/github.com/gorilla/websocket/json.go | 60 - vendor/github.com/gorilla/websocket/mask.go | 54 - .../github.com/gorilla/websocket/mask_safe.go | 15 - .../github.com/gorilla/websocket/prepared.go | 102 -- vendor/github.com/gorilla/websocket/proxy.go | 77 -- vendor/github.com/gorilla/websocket/server.go | 363 ----- vendor/github.com/gorilla/websocket/trace.go | 19 - .../github.com/gorilla/websocket/trace_17.go | 12 - vendor/github.com/gorilla/websocket/util.go | 283 ---- .../gorilla/websocket/x_net_proxy.go | 473 ------- 64 files changed, 9502 deletions(-) delete mode 100644 vendor/github.com/ONSdigital/dp-graph/neptune/codelist.go delete mode 100644 vendor/github.com/ONSdigital/dp-graph/neptune/codelistsdataset.go delete mode 100644 vendor/github.com/ONSdigital/dp-graph/neptune/dimension.go delete mode 100644 vendor/github.com/ONSdigital/dp-graph/neptune/driver/driver.go delete mode 100644 vendor/github.com/ONSdigital/dp-graph/neptune/driver/healthcheck.go delete mode 100644 vendor/github.com/ONSdigital/dp-graph/neptune/driver/neptunepool.go delete mode 100644 vendor/github.com/ONSdigital/dp-graph/neptune/hierarchy.go delete mode 100644 vendor/github.com/ONSdigital/dp-graph/neptune/instance.go delete mode 100644 vendor/github.com/ONSdigital/dp-graph/neptune/internal/mockpoolutils.go delete mode 100644 vendor/github.com/ONSdigital/dp-graph/neptune/internal/pool.go delete mode 100644 vendor/github.com/ONSdigital/dp-graph/neptune/mapper.go delete mode 100644 vendor/github.com/ONSdigital/dp-graph/neptune/mockedneptune.go delete mode 100644 vendor/github.com/ONSdigital/dp-graph/neptune/neptune.go delete mode 100644 vendor/github.com/ONSdigital/dp-graph/neptune/observation.go delete mode 100644 vendor/github.com/ONSdigital/dp-graph/neptune/query/query.go delete mode 100644 vendor/github.com/ONSdigital/graphson/deserialize.go delete mode 100644 vendor/github.com/ONSdigital/graphson/types.go delete mode 100644 vendor/github.com/ONSdigital/graphson/utils.go delete mode 100644 vendor/github.com/ONSdigital/graphson/validation_utils.go delete mode 100644 vendor/github.com/ONSdigital/gremgo-neptune/Dockerfile.gremlin delete mode 100644 vendor/github.com/ONSdigital/gremgo-neptune/Makefile delete mode 100644 vendor/github.com/ONSdigital/gremgo-neptune/README.md delete mode 100644 vendor/github.com/ONSdigital/gremgo-neptune/TODO.md delete mode 100644 vendor/github.com/ONSdigital/gremgo-neptune/client.go delete mode 100644 vendor/github.com/ONSdigital/gremgo-neptune/configuration.go delete mode 100644 vendor/github.com/ONSdigital/gremgo-neptune/connection.go delete mode 100644 vendor/github.com/ONSdigital/gremgo-neptune/cursor.go delete mode 100644 vendor/github.com/ONSdigital/gremgo-neptune/go.mod delete mode 100644 vendor/github.com/ONSdigital/gremgo-neptune/go.sum delete mode 100644 vendor/github.com/ONSdigital/gremgo-neptune/pool.go delete mode 100644 vendor/github.com/ONSdigital/gremgo-neptune/request.go delete mode 100644 vendor/github.com/ONSdigital/gremgo-neptune/response.go delete mode 100644 vendor/github.com/ONSdigital/gremgo-neptune/tags.go delete mode 100644 vendor/github.com/gofrs/uuid/LICENSE delete mode 100644 vendor/github.com/gofrs/uuid/README.md delete mode 100644 vendor/github.com/gofrs/uuid/codec.go delete mode 100644 vendor/github.com/gofrs/uuid/fuzz.go delete mode 100644 vendor/github.com/gofrs/uuid/generator.go delete mode 100644 vendor/github.com/gofrs/uuid/sql.go delete mode 100644 vendor/github.com/gofrs/uuid/uuid.go delete mode 100644 vendor/github.com/gorilla/websocket/AUTHORS delete mode 100644 vendor/github.com/gorilla/websocket/LICENSE delete mode 100644 vendor/github.com/gorilla/websocket/README.md delete mode 100644 vendor/github.com/gorilla/websocket/client.go delete mode 100644 vendor/github.com/gorilla/websocket/client_clone.go delete mode 100644 vendor/github.com/gorilla/websocket/client_clone_legacy.go delete mode 100644 vendor/github.com/gorilla/websocket/compression.go delete mode 100644 vendor/github.com/gorilla/websocket/conn.go delete mode 100644 vendor/github.com/gorilla/websocket/conn_write.go delete mode 100644 vendor/github.com/gorilla/websocket/conn_write_legacy.go delete mode 100644 vendor/github.com/gorilla/websocket/doc.go delete mode 100644 vendor/github.com/gorilla/websocket/go.mod delete mode 100644 vendor/github.com/gorilla/websocket/go.sum delete mode 100644 vendor/github.com/gorilla/websocket/join.go delete mode 100644 vendor/github.com/gorilla/websocket/json.go delete mode 100644 vendor/github.com/gorilla/websocket/mask.go delete mode 100644 vendor/github.com/gorilla/websocket/mask_safe.go delete mode 100644 vendor/github.com/gorilla/websocket/prepared.go delete mode 100644 vendor/github.com/gorilla/websocket/proxy.go delete mode 100644 vendor/github.com/gorilla/websocket/server.go delete mode 100644 vendor/github.com/gorilla/websocket/trace.go delete mode 100644 vendor/github.com/gorilla/websocket/trace_17.go delete mode 100644 vendor/github.com/gorilla/websocket/util.go delete mode 100644 vendor/github.com/gorilla/websocket/x_net_proxy.go diff --git a/vendor/github.com/ONSdigital/dp-graph/neptune/codelist.go b/vendor/github.com/ONSdigital/dp-graph/neptune/codelist.go deleted file mode 100644 index 6804a014..00000000 --- a/vendor/github.com/ONSdigital/dp-graph/neptune/codelist.go +++ /dev/null @@ -1,229 +0,0 @@ -/* -This module, when combined with codelistdataset.go, provides code that -satisfies the graph.driver.CodeList interface using Gremlin queries into -a Neptune database. -*/ - -package neptune - -import ( - "context" - "fmt" - - "github.com/pkg/errors" - - "github.com/ONSdigital/dp-code-list-api/models" - "github.com/ONSdigital/dp-graph/graph/driver" - "github.com/ONSdigital/dp-graph/neptune/query" -) - -/* -GetCodeLists provides a list of either all Code Lists, or a list of only those -having a boolean property with the name which is set to true. E.g. -"geography": true. The caller is expected to -fully qualify the embedded Links field afterwards. It returns an error if: -- The Gremlin query failed to execute. -- A CodeList is encountered that does not have *listID* property. -*/ -func (n *NeptuneDB) GetCodeLists(ctx context.Context, filterBy string) (*models.CodeListResults, error) { - // Use differing Gremlin queries - depending on if a filterBy string is specified. - var qry string - if filterBy == "" { - qry = fmt.Sprintf(query.GetCodeLists) - } else { - qry = fmt.Sprintf(query.GetCodeListsFiltered, filterBy) - } - codeListVertices, err := n.getVertices(qry) - if err != nil { - return nil, errors.Wrapf(err, "Gremlin query failed: %q", qry) - } - results := &models.CodeListResults{ - Count: len(codeListVertices), - Limit: len(codeListVertices), - TotalCount: len(codeListVertices), - } - for _, codeListVertex := range codeListVertices { - codeListID, err := codeListVertex.GetProperty("listID") - if err != nil { - return nil, errors.Wrapf(err, `Error reading "listID" property on Code List vertex`) - } - link := &models.CodeListLink{Self: &models.Link{ID: codeListID}} - codeListMdl := models.CodeList{codeListID, link} - results.Items = append(results.Items, codeListMdl) - } - return results, nil -} - -// GetCodeList provides a CodeList for a given ID (e.g. "ashe-earnings"), -// having checked it exists -// in the database. Nb. The caller is expected to fully qualify the embedded -// Links field afterwards. It returns an error if: -// - The Gremlin query failed to execute. -// - The requested CodeList does not exist. (error is `ErrNotFound`) -// - Duplicate CodeLists exist with the given ID (error is `ErrMultipleFound`) -func (n *NeptuneDB) GetCodeList(ctx context.Context, codeListID string) ( - *models.CodeList, error) { - existsQry := fmt.Sprintf(query.CodeListExists, codeListID) - count, err := n.getNumber(existsQry) - if err != nil { - return nil, errors.Wrapf(err, "Gremlin query failed: %q", existsQry) - } - if count == 0 { - return nil, driver.ErrNotFound - } - if count > 1 { - return nil, driver.ErrMultipleFound - } - - return &models.CodeList{ - Links: &models.CodeListLink{ - Self: &models.Link{ - ID: codeListID, - }, - }, - }, nil -} - -/* -GetEditions provides a models.Editions structure populated based on the -the values in the Code List vertices in the database, that have the provided -codeListId. -It returns an error if: -- The Gremlin query failed to execute. (wrapped error) -- No CodeLists are found of the requested codeListID (error is ErrNotFound') -- A CodeList is found that does not have the "edition" property (error is 'ErrNoSuchProperty') -*/ -func (n *NeptuneDB) GetEditions(ctx context.Context, codeListID string) (*models.Editions, error) { - qry := fmt.Sprintf(query.GetCodeList, codeListID) - codeLists, err := n.getVertices(qry) - if err != nil { - return nil, errors.Wrapf(err, "Gremlin query failed: %q", qry) - } - if len(codeLists) == 0 { - return nil, driver.ErrNotFound - } - editions := &models.Editions{ - Count: len(codeLists), - Offset: 0, - Limit: len(codeLists), - TotalCount: len(codeLists), - Items: []models.Edition{}, - } - for _, codeList := range codeLists { - editionString, err := codeList.GetProperty("edition") - if err != nil { - return nil, errors.Wrapf(err, `Error reading "edition" property on Code List vertex`) - } - edition := models.Edition{ - Links: &models.EditionLinks{ - Self: &models.Link{ - ID: editionString, - }, - }, - } - editions.Items = append(editions.Items, edition) - } - return editions, nil -} - -/* -GetEdition provides an Edition structure for the code list in the database that -has both the given codeListID (e.g. "ashed-earnings"), and the given edition string -(e.g. "one-off"). -Nb. The caller is expected to fully qualify the embedded Links field -afterwards. -It returns an error if: -- The Gremlin query failed to execute. (wrapped error) -- No CodeLists exist with the requested codeListID (error is `ErrNotFound`) -- A CodeList is found that does not have the "edition" property (error is 'ErrNoSuchProperty') -- More than one CodeList exists with the requested ID AND edition (error is `ErrMultipleFound`) -*/ -func (n *NeptuneDB) GetEdition(ctx context.Context, codeListID, edition string) (*models.Edition, error) { - qry := fmt.Sprintf(query.CodeListEditionExists, codeListID, edition) - nFound, err := n.getNumber(qry) - if err != nil { - return nil, errors.Wrapf(err, "Gremlin query failed: %q", qry) - } - if nFound == 0 { - return nil, driver.ErrNotFound - } - if nFound > 1 { - return nil, driver.ErrMultipleFound - } - // What we return (having performed the checks above), is actually hard-coded, as a function of the - // method parameters. - return &models.Edition{Links: &models.EditionLinks{Self: &models.Link{ID: edition}}}, nil -} - -/* -GetCodes provides a list of Code(s) packaged into a models.CodeResults structure that has been populated by -a database query that finds the Code List nodes of the required codeListID (e.g. "ashe-earnings"), and the -required edition (e.g. "one-off"), and then harvests the Code nodes that are known to be "usedBy" that -Code List. It raises a wrapped error if the database raises a non-transient error, (e.g. malformed -query). It raises driver.ErrNotFound if the graph traversal above produces an empty list of codes - -including the case of a short-circuit early termination of the query, because no such qualifying code -list exists. It returns a wrapped error if a Code is found that does not have a "value" property. -*/ -func (n *NeptuneDB) GetCodes(ctx context.Context, codeListID, edition string) (*models.CodeResults, error) { - qry := fmt.Sprintf(query.GetCodes, codeListID, edition) - codeResponses, err := n.getVertices(qry) - if err != nil { - return nil, errors.Wrapf(err, "Gremlin query failed: %q", qry) - } - if len(codeResponses) == 0 { - return nil, driver.ErrNotFound - } - codeResults := &models.CodeResults{ - Count: len(codeResponses), - Offset: 0, - Limit: len(codeResponses), - TotalCount: len(codeResponses), - Items: []models.Code{}, - } - - for _, codeResponse := range codeResponses { - codeValue, err := codeResponse.GetProperty("value") - if err != nil { - return nil, errors.Wrapf(err, `Error reading "value" property on Code vertex`) - } - codeItem := models.Code{ - Links: &models.CodeLinks{ - Self: &models.Link{ - ID: codeValue, - }, - }, - } - codeResults.Items = append(codeResults.Items, codeItem) - } - return codeResults, nil -} - -/* -GetCode provides a Code struct to represent the requested code list, edition and code string. -E.g. ashe-earnings|one-off|hourly-pay-gross. -It doesn't need to access the database to form the response, but does so to validate the -query. Specifically it can return errors as follows: -- The Gremlin query failed to execute. -- The query parameter values do not successfully navigate to a Code node. (error is `ErrNotFound`) -- Duplicate Code(s) exist that satisfy the search criteria (error is `ErrMultipleFound`) -*/ -func (n *NeptuneDB) GetCode(ctx context.Context, codeListID, edition string, code string) (*models.Code, error) { - qry := fmt.Sprintf(query.CodeExists, codeListID, edition, code) - nFound, err := n.getNumber(qry) - if err != nil { - return nil, errors.Wrapf(err, "Gremlin query failed: %q", qry) - } - if nFound == 0 { - return nil, driver.ErrNotFound - } - if nFound > 1 { - return nil, driver.ErrMultipleFound - } - return &models.Code{ - Links: &models.CodeLinks{ - Self: &models.Link{ - ID: code, - }, - }, - }, nil -} diff --git a/vendor/github.com/ONSdigital/dp-graph/neptune/codelistsdataset.go b/vendor/github.com/ONSdigital/dp-graph/neptune/codelistsdataset.go deleted file mode 100644 index e13d7d08..00000000 --- a/vendor/github.com/ONSdigital/dp-graph/neptune/codelistsdataset.go +++ /dev/null @@ -1,165 +0,0 @@ -/* -This module, when combined with codelist.go, provides code that -satisfies the graph.driver.CodeList interface using Gremlin queries into -a Neptune database. - -It is dedicated to code to satisfy the GetCodeDatasets() method - -which is sufficiently complex to merit a module (and tests) of its own. -*/ -package neptune - -import ( - "context" - "fmt" - "strconv" - - "github.com/pkg/errors" - - "github.com/ONSdigital/dp-code-list-api/models" - "github.com/ONSdigital/dp-graph/neptune/query" -) - -/* -GetCodeDatasets searches the database for datasets that are associated with -the given code list, code, and code list edition. Specifically those that -satisfy all of: - 1) code lists that match the requested code list ID. - 2) code lists of the requested edition. - 3) codes that match the requested code value. - 4) datasets that are related to qualifying codes by *inDataset* edges. - 5) datasets that have the *isPublished* state true. - -Each such result from the database (potentially) has the properties: - - dimensionName (what the dataset calls this dimension) - - datasetEdition - - version - -The results however include all permuations of dimensionName and -datasetEdition - BUT ONLY CITES the most recent dataset *version* of those -found for that permuation. - -*/ -func (n *NeptuneDB) GetCodeDatasets(ctx context.Context, codeListID, edition string, code string) (*models.Datasets, error) { - - // Emit the query and parse the responses. - qry := fmt.Sprintf(query.GetCodeDatasets, codeListID, edition, code) - responses, err := n.getStringList(qry) - if err != nil { - return nil, errors.Wrapf(err, "Gremlin GetCodeDatasets failed: %q", qry) - } - - // Isolate the individual records from the flattened response. - // [['dim', 'edition', 'version', 'datasetID'], ['dim', 'edition', ...]] - responseRecords, err := createRecords(responses) - if err != nil { - return nil, errors.Wrap(err, "Cannot create records.") - } - - // Build datastructure to capture only latest dataset versions. - latestVersionMaps, err := buildLatestVersionMaps(responseRecords) - if err != nil { - return nil, errors.Wrap(err, "Cannot isolate latest versions.") - } - - // Package up the model-ised response. - response := buildResponse(latestVersionMaps, code, codeListID) - return response, nil -} - -/* -createRecords splits a list of strings into clumps of 4 -*/ -func createRecords(responses []string) ([][]string, error) { - var responseRecords = [][]string{} - const stride = 4 // I.e. dimesionName, edition, version, datasetID - if len(responses)%stride != 0 { - return nil, errors.New("List length is not divisible by 4") - } - for i := 0; i < len(responses); i += stride { - dimensionName := responses[i+0] - datasetEdition := responses[i+1] - versionStr := responses[i+2] - datasetID := responses[i+3] - responseRecords = append(responseRecords, []string{dimensionName, datasetEdition, versionStr, datasetID}) - } - return responseRecords, nil -} - -// These (nested) maps track the latest version cited by any combination -// of dimensionName, dataset edition, and datasetID. -// They are all keyed on strings and the nested assembly can be accessed -// like this: -// latestVersion = foo[datasetID][dimension][edition] - -type editionToLatestVersion map[string]int -type dim2Edition map[string]editionToLatestVersion -type datasetID2Dim map[string]dim2Edition - -/* -buildLatestVersionMaps consumes a list of records such as -["dimName1", "datasetEdition1", "version4", "datasetID3"], and builds a datasetID2Dim -structure based on the latest versions available for each combination of -dimension name, dataset edition, and datasetID. -*/ -func buildLatestVersionMaps(responseRecords [][]string) (datasetID2Dim, error) { - did2Dim := datasetID2Dim{} - - for _, record := range responseRecords { - dimensionName := record[0] - datasetEdition := record[1] - versionStr := record[2] - datasetID := record[3] - - versionInt, err := strconv.Atoi(versionStr) - if err != nil { - return nil, errors.Wrapf(err, "Cannot cast version (%q) to int", versionStr) - } - if _, ok := did2Dim[datasetID]; !ok { - did2Dim[datasetID] = dim2Edition{} - } - if _, ok := did2Dim[datasetID][dimensionName]; !ok { - did2Dim[datasetID][dimensionName] = editionToLatestVersion{} - } - latestKnownV, ok := did2Dim[datasetID][dimensionName][datasetEdition] - if !ok || latestKnownV < versionInt { - did2Dim[datasetID][dimensionName][datasetEdition] = versionInt - } - } - return did2Dim, nil -} - -/* -buildResponse is capable of consuming a datasetID2Dim data structure, along -with a few other query parameters, and from these, building the data -structure model hierchy required by the GetCodeDatasets API method. -*/ -func buildResponse(did2Dim datasetID2Dim, code string, codeListID string) *models.Datasets { - datasets := &models.Datasets{ - Items: []models.Dataset{}, - Count: len(did2Dim), - Limit: len(did2Dim), - TotalCount: len(did2Dim), - } - for datasetID, dim2E := range did2Dim { - for dimensionName, e2v := range dim2E { - datasetLinks := &models.DatasetLinks{Self: &models.Link{ID: datasetID}} - dataset := models.Dataset{ - Links: datasetLinks, - DimensionLabel: dimensionName, - Editions: []models.DatasetEdition{}, - } - for datasetEdition, version := range e2v { - versionStr := fmt.Sprintf("%d", version) - edition := models.DatasetEdition{} - edition.Links = &models.DatasetEditionLinks{ - Self: &models.Link{ID: datasetEdition}, - LatestVersion: &models.Link{ID: versionStr}, - DatasetDimension: &models.Link{ID: codeListID}, - } - dataset.Editions = append(dataset.Editions, edition) - } - datasets.Items = append(datasets.Items, dataset) - } - } - return datasets -} diff --git a/vendor/github.com/ONSdigital/dp-graph/neptune/dimension.go b/vendor/github.com/ONSdigital/dp-graph/neptune/dimension.go deleted file mode 100644 index 0cbe5b8f..00000000 --- a/vendor/github.com/ONSdigital/dp-graph/neptune/dimension.go +++ /dev/null @@ -1,35 +0,0 @@ -package neptune - -import ( - "context" - "fmt" - - "github.com/ONSdigital/dp-dimension-importer/model" - "github.com/ONSdigital/dp-graph/neptune/query" -) - -// InsertDimension node to neptune and create relationships to the instance node. -// Where nodes and relationships already exist, ensure they are upserted. -func (n *NeptuneDB) InsertDimension(ctx context.Context, uniqueDimensions map[string]string, i *model.Instance, d *model.Dimension) (*model.Dimension, error) { - if err := i.Validate(); err != nil { - return nil, err - } - if err := d.Validate(); err != nil { - return nil, err - } - - dimensionLabel := fmt.Sprintf("_%s_%s", i.InstanceID, d.DimensionID) - - res, err := n.getVertex(fmt.Sprintf(query.CreateDimensionToInstanceRelationship, i.InstanceID, d.DimensionID, d.Option, i.InstanceID, d.DimensionID, d.Option, i.InstanceID)) - if err != nil { - return nil, err - } - - d.NodeID = res.GetID() - - if _, ok := uniqueDimensions[dimensionLabel]; !ok { - uniqueDimensions[dimensionLabel] = dimensionLabel - i.AddDimension(d) - } - return d, nil -} diff --git a/vendor/github.com/ONSdigital/dp-graph/neptune/driver/driver.go b/vendor/github.com/ONSdigital/dp-graph/neptune/driver/driver.go deleted file mode 100644 index d3fd7426..00000000 --- a/vendor/github.com/ONSdigital/dp-graph/neptune/driver/driver.go +++ /dev/null @@ -1,23 +0,0 @@ -package driver - -import ( - "context" - - gremgo "github.com/ONSdigital/gremgo-neptune" -) - -type NeptuneDriver struct { - Pool NeptunePool // Defined with an interface to support mocking. -} - -func New(ctx context.Context, dbAddr string, errs chan error) (*NeptuneDriver, error) { - pool := gremgo.NewPoolWithDialerCtx(ctx, dbAddr, errs) - return &NeptuneDriver{ - Pool: pool, - }, nil -} - -func (n *NeptuneDriver) Close(ctx context.Context) error { - n.Pool.Close() - return nil -} diff --git a/vendor/github.com/ONSdigital/dp-graph/neptune/driver/healthcheck.go b/vendor/github.com/ONSdigital/dp-graph/neptune/driver/healthcheck.go deleted file mode 100644 index 7c060706..00000000 --- a/vendor/github.com/ONSdigital/dp-graph/neptune/driver/healthcheck.go +++ /dev/null @@ -1,14 +0,0 @@ -package driver - -const ( - serviceName = "neptune" - pingStmt = "g.V().limit(1)" -) - -// Healthcheck calls neptune to check its health status -func (n *NeptuneDriver) Healthcheck() (s string, err error) { - if _, err = n.Pool.Get(pingStmt, nil, nil); err != nil { - return serviceName, err - } - return serviceName, nil -} diff --git a/vendor/github.com/ONSdigital/dp-graph/neptune/driver/neptunepool.go b/vendor/github.com/ONSdigital/dp-graph/neptune/driver/neptunepool.go deleted file mode 100644 index f053dd51..00000000 --- a/vendor/github.com/ONSdigital/dp-graph/neptune/driver/neptunepool.go +++ /dev/null @@ -1,24 +0,0 @@ -package driver - -import ( - "context" - - "github.com/ONSdigital/graphson" - gremgo "github.com/ONSdigital/gremgo-neptune" -) - -//go:generate moq -out ../internal/pool.go -pkg internal . NeptunePool - -/* -NeptunePool defines the contract required of the gremgo -connection Pool by the Neptune.Driver. -*/ -type NeptunePool interface { - Close() - Execute(query string, bindings, rebindings map[string]string) (resp []gremgo.Response, err error) - Get(query string, bindings, rebindings map[string]string) ([]graphson.Vertex, error) - GetCount(q string, bindings, rebindings map[string]string) (i int64, err error) - GetE(q string, bindings, rebindings map[string]string) (resp interface{}, err error) - OpenStreamCursor(ctx context.Context, query string, bindings, rebindings map[string]string) (stream *gremgo.Stream, err error) - GetStringList(query string, bindings, rebindings map[string]string) (vals []string, err error) -} diff --git a/vendor/github.com/ONSdigital/dp-graph/neptune/hierarchy.go b/vendor/github.com/ONSdigital/dp-graph/neptune/hierarchy.go deleted file mode 100644 index aebaa482..00000000 --- a/vendor/github.com/ONSdigital/dp-graph/neptune/hierarchy.go +++ /dev/null @@ -1,296 +0,0 @@ -package neptune - -import ( - "context" - "fmt" - - "github.com/ONSdigital/dp-graph/graph/driver" - "github.com/ONSdigital/dp-graph/neptune/query" - "github.com/ONSdigital/dp-hierarchy-api/models" - "github.com/ONSdigital/go-ns/log" - "github.com/ONSdigital/graphson" - "github.com/pkg/errors" -) - -func (n *NeptuneDB) CreateInstanceHierarchyConstraints(ctx context.Context, attempt int, instanceID, dimensionName string) error { - return errors.New("method not supported: CreateInstanceHierarchyConstraints") -} - -func (n *NeptuneDB) CloneNodes(ctx context.Context, attempt int, instanceID, codeListID, dimensionName string) (err error) { - gremStmt := fmt.Sprintf( - query.CloneHierarchyNodes, - codeListID, - instanceID, - dimensionName, - codeListID, - ) - logData := log.Data{"fn": "CloneNodes", - "gremlin": gremStmt, - "instance_id": instanceID, - "code_list_id": codeListID, - "dimension_name": dimensionName, - } - log.Debug("cloning nodes from the generic hierarchy", logData) - - if _, err = n.getVertices(gremStmt); err != nil { - log.ErrorC("cannot get vertices during cloning", err, logData) - return - } - - return -} - -func (n *NeptuneDB) CountNodes(ctx context.Context, instanceID, dimensionName string) (count int64, err error) { - gremStmt := fmt.Sprintf(query.CountHierarchyNodes, instanceID, dimensionName) - logData := log.Data{ - "fn": "CountNodes", - "gremlin": gremStmt, - "instance_id": instanceID, - "dimension_name": dimensionName, - } - log.Debug("counting nodes in the new instance hierarchy", logData) - - if count, err = n.getNumber(gremStmt); err != nil { - log.ErrorC("cannot count nodes in a hierarchy", err, logData) - return - } - return -} - -func (n *NeptuneDB) CloneRelationships(ctx context.Context, attempt int, instanceID, codeListID, dimensionName string) (err error) { - gremStmt := fmt.Sprintf( - query.CloneHierarchyRelationships, - codeListID, - instanceID, - dimensionName, - instanceID, - dimensionName, - ) - logData := log.Data{ - "fn": "CloneRelationships", - "instance_id": instanceID, - "code_list_id": codeListID, - "dimension_name": dimensionName, - "gremlin": gremStmt, - } - log.Debug("cloning relationships from the generic hierarchy", logData) - - if _, err = n.getEdges(gremStmt); err != nil { - log.ErrorC("cannot find edges while cloning relationships", err, logData) - return - } - - return n.RemoveCloneEdges(ctx, attempt, instanceID, dimensionName) -} - -func (n *NeptuneDB) RemoveCloneEdges(ctx context.Context, attempt int, instanceID, dimensionName string) (err error) { - gremStmt := fmt.Sprintf( - query.RemoveCloneMarkers, - instanceID, - dimensionName, - ) - logData := log.Data{ - "fn": "RemoveCloneEdges", - "instance_id": instanceID, - "dimension_name": dimensionName, - "gremlin": gremStmt, - } - log.Debug("removing edges to generic hierarchy", logData) - - if _, err = n.exec(gremStmt); err != nil { - log.ErrorC("exec failed while removing edges during removal of unwanted cloned edges", err, logData) - return - } - return -} - -func (n *NeptuneDB) SetNumberOfChildren(ctx context.Context, attempt int, instanceID, dimensionName string) (err error) { - gremStmt := fmt.Sprintf( - query.SetNumberOfChildren, - instanceID, - dimensionName, - ) - - logData := log.Data{ - "fn": "SetNumberOfChildren", - "instance_id": instanceID, - "dimension_name": dimensionName, - "gremlin": gremStmt, - } - - log.Debug("setting number-of-children property value on the instance hierarchy nodes", logData) - - if _, err = n.getVertices(gremStmt); err != nil { - log.ErrorC("cannot find vertices while settting nChildren on hierarchy nodes", err, logData) - return - } - - return -} - -func (n *NeptuneDB) SetHasData(ctx context.Context, attempt int, instanceID, dimensionName string) (err error) { - gremStmt := fmt.Sprintf( - query.SetHasData, - instanceID, - dimensionName, - instanceID, - dimensionName, - ) - - logData := log.Data{ - "instance_id": instanceID, - "dimension_name": dimensionName, - "gremlin": gremStmt, - } - - log.Debug("setting has-data property on the instance hierarchy", logData) - - if _, err = n.getVertices(gremStmt); err != nil { - log.ErrorC("cannot find vertices while setting hasData on hierarchy nodes", err, logData) - return - } - - return -} - -func (n *NeptuneDB) MarkNodesToRemain(ctx context.Context, attempt int, instanceID, dimensionName string) (err error) { - gremStmt := fmt.Sprintf(query.MarkNodesToRemain, - instanceID, - dimensionName, - // instanceID, - // dimensionName, - ) - - logData := log.Data{ - "instance_id": instanceID, - "dimension_name": dimensionName, - "gremlin": gremStmt, - } - - log.Debug("marking nodes to remain after trimming sparse branches", logData) - - if _, err = n.getVertices(gremStmt); err != nil { - log.ErrorC("cannot find vertices while marking hierarchy nodes to keep", err, logData) - return - } - - return -} - -func (n *NeptuneDB) RemoveNodesNotMarkedToRemain(ctx context.Context, attempt int, instanceID, dimensionName string) (err error) { - gremStmt := fmt.Sprintf(query.RemoveNodesNotMarkedToRemain, instanceID, dimensionName) - logData := log.Data{ - "instance_id": instanceID, - "dimension_name": dimensionName, - "gremlin": gremStmt, - } - - log.Debug("removing nodes not marked to remain after trimming sparse branches", logData) - - if _, err = n.exec(gremStmt); err != nil { - log.ErrorC("exec query failed while removing hierarchy nodes to cull", err, logData) - return - } - return -} - -func (n *NeptuneDB) RemoveRemainMarker(ctx context.Context, attempt int, instanceID, dimensionName string) (err error) { - gremStmt := fmt.Sprintf(query.RemoveRemainMarker, instanceID, dimensionName) - logData := log.Data{ - "fn": "RemoveRemainMarker", - "gremlin": gremStmt, - "instance_id": instanceID, - "dimension_name": dimensionName, - } - log.Debug("removing the remain property from the nodes that remain", logData) - - if _, err = n.exec(gremStmt); err != nil { - log.ErrorC("exec query failed while removing spent remain markers from hierarchy nodes", err, logData) - return - } - return -} - -func (n *NeptuneDB) GetHierarchyCodelist(ctx context.Context, instanceID, dimension string) (codelistID string, err error) { - gremStmt := fmt.Sprintf(query.HierarchyExists, instanceID, dimension) - logData := log.Data{ - "fn": "GetHierarchyCodelist", - "gremlin": gremStmt, - "instance_id": instanceID, - "dimension_name": dimension, - } - - var vertex graphson.Vertex - if vertex, err = n.getVertex(gremStmt); err != nil { - log.ErrorC("cannot get vertices while searching for code list node related to hierarchy node", err, logData) - return - } - if codelistID, err = vertex.GetProperty("code_list"); err != nil { - log.ErrorC("cannot read code_list property from node", err, logData) - return - } - return -} - -func (n *NeptuneDB) GetHierarchyRoot(ctx context.Context, instanceID, dimension string) (node *models.Response, err error) { - gremStmt := fmt.Sprintf(query.GetHierarchyRoot, instanceID, dimension) - logData := log.Data{ - "fn": "GetHierarchyRoot", - "gremlin": gremStmt, - "instance_id": instanceID, - "dimension_name": dimension, - } - - var vertices []graphson.Vertex - if vertices, err = n.getVertices(gremStmt); err != nil { - log.ErrorC("getVertices failed: cannot find hierarchy root node candidates ", err, logData) - return - } - if len(vertices) == 0 { - err = driver.ErrNotFound - log.ErrorC("Cannot find hierarchy root node", err, logData) - return - } - if len(vertices) > 1 { - err = driver.ErrMultipleFound - log.ErrorC("Cannot identify hierarchy root node because are multiple candidates", err, logData) - return - } - var vertex graphson.Vertex - vertex = vertices[0] - // Note the call to buildHierarchyNodeFromGraphsonVertex below does much more than meets the eye, - // including launching new queries in of itself to fetch child nodes, and - // breadcrumb nodes. - wantBreadcrumbs := false // Because meaningless for a root node - if node, err = n.buildHierarchyNodeFromGraphsonVertex(vertex, instanceID, dimension, wantBreadcrumbs); err != nil { - log.ErrorC("Cannot extract related information needed from hierarchy node", err, logData) - return - } - return -} - -func (n *NeptuneDB) GetHierarchyElement(ctx context.Context, instanceID, dimension, code string) (node *models.Response, err error) { - gremStmt := fmt.Sprintf(query.GetHierarchyElement, instanceID, dimension, code) - logData := log.Data{ - "fn": "GetHierarchyElement", - "gremlin": gremStmt, - "instance_id": instanceID, - "code_list_id": code, - "dimension_name": dimension, - } - - var vertex graphson.Vertex - if vertex, err = n.getVertex(gremStmt); err != nil { - log.ErrorC("Cannot find vertex when looking for specific hierarchy node", err, logData) - return - } - // Note the call to buildHierarchyNodeFromGraphsonVertex below does much more than meets the eye, - // including launching new queries in of itself to fetch child nodes, and - // breadcrumb nodes. - wantBreadcrumbs := true // Because we are at depth in the hierarchy - if node, err = n.buildHierarchyNodeFromGraphsonVertex(vertex, instanceID, dimension, wantBreadcrumbs); err != nil { - log.ErrorC("Cannot extract related information needed from hierarchy node", err, logData) - return - } - return -} diff --git a/vendor/github.com/ONSdigital/dp-graph/neptune/instance.go b/vendor/github.com/ONSdigital/dp-graph/neptune/instance.go deleted file mode 100644 index b10472dc..00000000 --- a/vendor/github.com/ONSdigital/dp-graph/neptune/instance.go +++ /dev/null @@ -1,162 +0,0 @@ -package neptune - -import ( - "context" - "fmt" - "strings" - - "github.com/ONSdigital/dp-dimension-importer/model" - "github.com/ONSdigital/dp-graph/neptune/query" - "github.com/ONSdigital/go-ns/log" - gremgo "github.com/ONSdigital/gremgo-neptune" - "github.com/pkg/errors" -) - -const codeListNotFoundFmt = "VertexStep(OUT,[usedBy],vertex), HasStep([~label.eq(_code_list_%s)" - -// CountInsertedObservations returns the current number of observations relating to a given instance -func (n *NeptuneDB) CountInsertedObservations(ctx context.Context, instanceID string) (count int64, err error) { - return n.getNumber(fmt.Sprintf(query.CountObservations, instanceID)) -} - -// AddVersionDetailsToInstance updates an instance node to contain details of which -// dataset, edition and version the instance will also be known by -func (n *NeptuneDB) AddVersionDetailsToInstance(ctx context.Context, instanceID string, datasetID string, edition string, version int) error { - data := log.Data{ - "instance_id": instanceID, - "dataset_id": datasetID, - "edition": edition, - "version": version, - } - - q := fmt.Sprintf(query.AddVersionDetailsToInstance, instanceID, datasetID, edition, version) - - if _, err := n.exec(q); err != nil { - log.ErrorC("neptune exec failed on AddVersionDetailsToInstance", err, data) - return err - } - return nil -} - -// SetInstanceIsPublished sets a flag on an instance node to indicate the published state -func (n *NeptuneDB) SetInstanceIsPublished(ctx context.Context, instanceID string) error { - data := log.Data{ - "instance_id": instanceID, - } - - q := fmt.Sprintf(query.SetInstanceIsPublished, instanceID) - - if _, err := n.exec(q); err != nil { - log.ErrorC("neptune exec failed on SetInstanceIsPublished", err, data) - return err - } - return nil -} - -// CreateInstanceConstraint is not needed for the neptune implementation, as constraints are -// not a neptune construct -func (n *NeptuneDB) CreateInstanceConstraint(ctx context.Context, i *model.Instance) error { - return errors.New("method not supported: CreateInstanceConstraint") -} - -// CreateInstance will check if an instance node already exists and create one from -// the provided details if one does not exist -func (n *NeptuneDB) CreateInstance(ctx context.Context, i *model.Instance) error { - if err := i.Validate(); err != nil { - return err - } - - data := log.Data{ - "instance_id": i.InstanceID, - } - - exists, err := n.InstanceExists(ctx, i) - if err != nil { - return err - } - - if exists { - log.Info("instance already exists in neptune", data) - return nil - } - - create := fmt.Sprintf(query.CreateInstance, i.InstanceID, strings.Join(i.CSVHeader, ",")) - if _, err := n.exec(create); err != nil { - log.ErrorC("neptune exec failed on CreateInstance", err, data) - return err - } - return nil -} - -// AddDimensions list to the specified instance node -func (n *NeptuneDB) AddDimensions(ctx context.Context, i *model.Instance) error { - if err := i.Validate(); err != nil { - return err - } - - data := log.Data{ - "instance_id": i.InstanceID, - } - - q := fmt.Sprintf(query.AddInstanceDimensionsPart, i.InstanceID) - for _, d := range i.Dimensions { - q += fmt.Sprintf(query.AddInstanceDimensionsPropertyPart, d.(string)) - } - - if _, err := n.exec(q); err != nil { - log.ErrorC("neptune exec failed on AddDimensions", err, data) - return err - } - - return nil -} - -// CreateCodeRelationship links an instance to a code for the given dimension option -func (n *NeptuneDB) CreateCodeRelationship(ctx context.Context, i *model.Instance, codeListID, code string) error { - if err := i.Validate(); err != nil { - return err - } - - if len(code) == 0 { - return errors.New("error creating relationship from instance to code: code is required but was empty") - } - - data := log.Data{ - "instance_id": i.InstanceID, - "code_list": codeListID, - "code": code, - } - - createRelationships := fmt.Sprintf(query.CreateInstanceToCodeRelationship, i.InstanceID, code, codeListID) - if res, err := n.exec(createRelationships); err != nil { - if len(res) > 0 && res[0].Status.Code == gremgo.StatusScriptEvaluationError && - strings.Contains(res[0].Status.Message, fmt.Sprintf(codeListNotFoundFmt, codeListID)) { - - return errors.Wrapf(err, "error creating relationship from instance to code: code or code list not found", data) - } - log.ErrorC("neptune exec failed on CreateCodeRelationship", err, data) - return err - } - - return nil -} - -// InstanceExists returns true if an instance already exists with the provided id -func (n *NeptuneDB) InstanceExists(ctx context.Context, i *model.Instance) (bool, error) { - data := log.Data{ - "instance_id": i.InstanceID, - } - - exists := fmt.Sprintf(query.CheckInstance, i.InstanceID) - count, err := n.getNumber(exists) - if err != nil { - log.ErrorC("neptune getNumber failed to check if instance exists", err, data) - return false, err - } - - if count == 0 { - return false, nil - } - - return true, nil -} diff --git a/vendor/github.com/ONSdigital/dp-graph/neptune/internal/mockpoolutils.go b/vendor/github.com/ONSdigital/dp-graph/neptune/internal/mockpoolutils.go deleted file mode 100644 index 5514aa2a..00000000 --- a/vendor/github.com/ONSdigital/dp-graph/neptune/internal/mockpoolutils.go +++ /dev/null @@ -1,184 +0,0 @@ -package internal - -import ( - "fmt" - - "github.com/ONSdigital/graphson" -) - -/* -This module provides a handful of mock convenience functions that can be -used to inject behaviour into NeptunePoolMock. -*/ - -import ( - "errors" -) - -// ReturnOne is a mock implementation for NeptunePool.GetCount() -// that always returns a count of 1. -var ReturnOne = func(q string, bindings, rebindings map[string]string) (i int64, err error) { - return 1, nil -} - -// ReturnTwo is a mock implementation for NeptunePool.GetCount() -// that always returns a count of 2. -var ReturnTwo = func(q string, bindings, rebindings map[string]string) (i int64, err error) { - return 2, nil -} - -// ReturnZero is a mock implementation for NeptunePool.GetCount() -// that always returns a count of 0. -var ReturnZero = func(q string, bindings, rebindings map[string]string) (i int64, err error) { - return 0, nil -} - -// ReturnMalformedIntRequestErr is a mock implementation for NeptunePool.GetCount() -// that always returns an error that is judged to be not transient by -// neptune.isTransientError -var ReturnMalformedIntRequestErr = func(q string, bindings, rebindings map[string]string) (i int64, err error) { - return -1, errors.New(" MALFORMED REQUEST ") -} - -// ReturnMalformedNilInterfaceRequestErr is a mock implementation for -// NeptunePool functions that return ([]graphson.Vertex, error) which always returns an -// error that is judged to be not transient by neptune.isTransientError -var ReturnMalformedNilInterfaceRequestErr = func(q string, bindings, rebindings map[string]string) ([]graphson.Vertex, error) { - return nil, errors.New(" MALFORMED REQUEST ") -} - -// ReturnMalformedStringListRequestErr is a mock implementation for -// NeptunePool functions that return ([]string, error) which always returns an -// error that is judged to be not transient by neptune.isTransientError -var ReturnMalformedStringListRequestErr = func(q string, bindings, rebindings map[string]string) ([]string, error) { - return nil, errors.New(" MALFORMED REQUEST ") -} - -// ReturnThreeCodeLists is mock implementation for NeptunePool.Get() that always -// returns a slice of three graphson.Vertex(s): -// - of type "_code_list" -// - with a "listID" property set to "listID_0", "listID_1", and "ListID_2" respectively. -// - with an "edition" property set to "my-test-edition" -var ReturnThreeCodeLists = func(query string, bindings map[string]string, rebindings map[string]string) ([]graphson.Vertex, error) { - codeLists := []graphson.Vertex{} - for i := 0; i < 3; i++ { - vertex := makeCodeListVertex(i, "my-test-edition") - codeLists = append(codeLists, vertex) - } - return codeLists, nil -} - -// ReturnThreeEditionVertices is mock implementation for NeptunePool.Get() that always -// returns a slice of three graphson.Vertex(s): -// - of type "unused-vertex-type" -// - with a an "edition" property set to "edition_0", "edition_1", and "edition_2" respectively. -var ReturnThreeEditionVertices = func(query string, bindings map[string]string, rebindings map[string]string) ([]graphson.Vertex, error) { - editions := []graphson.Vertex{} - for i := 0; i < 3; i++ { - vertex := makeVertex("unused-vertex-type") - setVertexStringProperty(&vertex, "edition", fmt.Sprintf("edition_%d", i)) - editions = append(editions, vertex) - } - return editions, nil -} - -// ReturnThreeCodeVertices is mock implementation for NeptunePool.Get() that always -// returns a slice of three graphson.Vertex(s): -// - of type "unused-vertex-type" -// - with a "value" property set to "code_0", "code_1", and "code_2" respectively. -var ReturnThreeCodeVertices = func(query string, bindings map[string]string, rebindings map[string]string) ([]graphson.Vertex, error) { - codes := []graphson.Vertex{} - for i := 0; i < 3; i++ { - vertex := makeVertex("unused-vertex-type") - setVertexStringProperty(&vertex, "value", fmt.Sprintf("code_%d", i)) - codes = append(codes, vertex) - } - return codes, nil -} - -// ReturnThreeUselessVertices is mock implementation for NeptunePool.Get() that always -// returns a slice of three graphson.Vertex(s) of type "_useless_vertex_type", and with -// no properties set. -var ReturnThreeUselessVertices = func(query string, bindings map[string]string, rebindings map[string]string) ([]graphson.Vertex, error) { - codeLists := []graphson.Vertex{} - for i := 0; i < 3; i++ { - vertex := makeVertex("_useless_vertex_type") - codeLists = append(codeLists, vertex) - } - return codeLists, nil -} - -// ReturnZeroVertices provides an empty list of graphson.Vertex(s) -var ReturnZeroVertices = func(query string, bindings map[string]string, rebindings map[string]string) ([]graphson.Vertex, error) { - return []graphson.Vertex{}, nil -} - -/* -makeVertex makes a graphson.Vertex of a given type (e.g. "_code_list"). -*/ -func makeVertex(vertexType string) graphson.Vertex { - vertexValue := graphson.VertexValue{ - ID: "unused_vertex_value_ID", - Label: vertexType, - Properties: map[string][]graphson.VertexProperty{}, - } - vertex := graphson.Vertex{Type: vertexType, Value: vertexValue} - return vertex -} - -/* -setVertexTypedProperty sets the given key/polymorphic-value to a vertex. -The "theType" parameter must be "string" or "int". -*/ -func setVertexTypedProperty(theType string, vertex *graphson.Vertex, key string, value interface{}) { - gv := graphson.GenericValue{Type: "string", Value: key} - pv := graphson.VertexPropertyValue{ - ID: gv, - Label: key, - Value: value, - } - vertexProperty := graphson.VertexProperty{Type: theType, Value: pv} - vertexProperties := []graphson.VertexProperty{vertexProperty} - vertex.Value.Properties[key] = vertexProperties -} - -// setVertexStringProperty sets the given key/value in a vertex. -func setVertexStringProperty(vertex *graphson.Vertex, key string, value interface{}) { - setVertexTypedProperty("string", vertex, key, value) -} - -// setVertexIntProperty sets the given key/value in a vertex. -func setVertexIntProperty(vertex *graphson.Vertex, key string, value int) { - setVertexTypedProperty("int", vertex, key, value) -} - -// makeCodeListVertex provides a graphson.Vertex with a vertex type of the -// form "_code_list", and a "listID" property of the form "listID_3". -// It is also given an "edition" property with the supplied value. -func makeCodeListVertex(listIDSuffix int, edition string) graphson.Vertex { - v := makeVertex("_code_list") - setVertexStringProperty(&v, "listID", fmt.Sprintf("listID_%d", listIDSuffix)) - setVertexStringProperty(&v, "edition", edition) - return v -} - -// ReturnFiveStrings is a mock implementation for -// NeptunePool functions that return ([]string, error) which always returns -// five strings. -var ReturnFiveStrings = func(q string, bindings, rebindings map[string]string) ([]string, error) { - return []string{"a", "b", "c", "d", "e"}, nil -} - -// ReturnStringRecordWithNonIntegerFourthElement is a mock implementation for -// NeptunePool functions that return ([]string, error) which always returns -// 4 strings - in which the third one cannot be cast to an integer. -var ReturnStringRecordWithNonIntegerFourthElement = func(q string, bindings, rebindings map[string]string) ([]string, error) { - return []string{"1", "2", "fibble", "3"}, nil -} - -// ReturnProperlyFormedDatasetRecord is a mock implementation for -// NeptunePool functions that return ([]string, error) which always returns -// A single quartet of strings that should satisfy the GetCodeDatasets method. -var ReturnProperlyFormedDatasetRecord = func(q string, bindings, rebindings map[string]string) ([]string, error) { - return []string{"exampleDimName", "exampleDatasetEdition", "3", "exampleDatasetID"}, nil -} diff --git a/vendor/github.com/ONSdigital/dp-graph/neptune/internal/pool.go b/vendor/github.com/ONSdigital/dp-graph/neptune/internal/pool.go deleted file mode 100644 index 17bc839b..00000000 --- a/vendor/github.com/ONSdigital/dp-graph/neptune/internal/pool.go +++ /dev/null @@ -1,409 +0,0 @@ -// Code generated by moq; DO NOT EDIT. -// github.com/matryer/moq - -package internal - -import ( - "context" - "github.com/ONSdigital/dp-graph/neptune/driver" - "github.com/ONSdigital/graphson" - "github.com/ONSdigital/gremgo-neptune" - "sync" -) - -var ( - lockNeptunePoolMockClose sync.RWMutex - lockNeptunePoolMockExecute sync.RWMutex - lockNeptunePoolMockGet sync.RWMutex - lockNeptunePoolMockGetCount sync.RWMutex - lockNeptunePoolMockGetE sync.RWMutex - lockNeptunePoolMockGetStringList sync.RWMutex - lockNeptunePoolMockOpenStreamCursor sync.RWMutex -) - -// Ensure, that NeptunePoolMock does implement NeptunePool. -// If this is not the case, regenerate this file with moq. -var _ driver.NeptunePool = &NeptunePoolMock{} - -// NeptunePoolMock is a mock implementation of NeptunePool. -// -// func TestSomethingThatUsesNeptunePool(t *testing.T) { -// -// // make and configure a mocked NeptunePool -// mockedNeptunePool := &NeptunePoolMock{ -// CloseFunc: func() { -// panic("mock out the Close method") -// }, -// ExecuteFunc: func(query string, bindings map[string]string, rebindings map[string]string) ([]gremgo.Response, error) { -// panic("mock out the Execute method") -// }, -// GetFunc: func(query string, bindings map[string]string, rebindings map[string]string) ([]graphson.Vertex, error) { -// panic("mock out the Get method") -// }, -// GetCountFunc: func(q string, bindings map[string]string, rebindings map[string]string) (int64, error) { -// panic("mock out the GetCount method") -// }, -// GetEFunc: func(q string, bindings map[string]string, rebindings map[string]string) (interface{}, error) { -// panic("mock out the GetE method") -// }, -// GetStringListFunc: func(query string, bindings map[string]string, rebindings map[string]string) ([]string, error) { -// panic("mock out the GetStringList method") -// }, -// OpenStreamCursorFunc: func(ctx context.Context, query string, bindings map[string]string, rebindings map[string]string) (*gremgo.Stream, error) { -// panic("mock out the OpenStreamCursor method") -// }, -// } -// -// // use mockedNeptunePool in code that requires NeptunePool -// // and then make assertions. -// -// } -type NeptunePoolMock struct { - // CloseFunc mocks the Close method. - CloseFunc func() - - // ExecuteFunc mocks the Execute method. - ExecuteFunc func(query string, bindings map[string]string, rebindings map[string]string) ([]gremgo.Response, error) - - // GetFunc mocks the Get method. - GetFunc func(query string, bindings map[string]string, rebindings map[string]string) ([]graphson.Vertex, error) - - // GetCountFunc mocks the GetCount method. - GetCountFunc func(q string, bindings map[string]string, rebindings map[string]string) (int64, error) - - // GetEFunc mocks the GetE method. - GetEFunc func(q string, bindings map[string]string, rebindings map[string]string) (interface{}, error) - - // GetStringListFunc mocks the GetStringList method. - GetStringListFunc func(query string, bindings map[string]string, rebindings map[string]string) ([]string, error) - - // OpenStreamCursorFunc mocks the OpenStreamCursor method. - OpenStreamCursorFunc func(ctx context.Context, query string, bindings map[string]string, rebindings map[string]string) (*gremgo.Stream, error) - - // calls tracks calls to the methods. - calls struct { - // Close holds details about calls to the Close method. - Close []struct { - } - // Execute holds details about calls to the Execute method. - Execute []struct { - // Query is the query argument value. - Query string - // Bindings is the bindings argument value. - Bindings map[string]string - // Rebindings is the rebindings argument value. - Rebindings map[string]string - } - // Get holds details about calls to the Get method. - Get []struct { - // Query is the query argument value. - Query string - // Bindings is the bindings argument value. - Bindings map[string]string - // Rebindings is the rebindings argument value. - Rebindings map[string]string - } - // GetCount holds details about calls to the GetCount method. - GetCount []struct { - // Q is the q argument value. - Q string - // Bindings is the bindings argument value. - Bindings map[string]string - // Rebindings is the rebindings argument value. - Rebindings map[string]string - } - // GetE holds details about calls to the GetE method. - GetE []struct { - // Q is the q argument value. - Q string - // Bindings is the bindings argument value. - Bindings map[string]string - // Rebindings is the rebindings argument value. - Rebindings map[string]string - } - // GetStringList holds details about calls to the GetStringList method. - GetStringList []struct { - // Query is the query argument value. - Query string - // Bindings is the bindings argument value. - Bindings map[string]string - // Rebindings is the rebindings argument value. - Rebindings map[string]string - } - // OpenStreamCursor holds details about calls to the OpenStreamCursor method. - OpenStreamCursor []struct { - // Ctx is the ctx argument value. - Ctx context.Context - // Query is the query argument value. - Query string - // Bindings is the bindings argument value. - Bindings map[string]string - // Rebindings is the rebindings argument value. - Rebindings map[string]string - } - } -} - -// Close calls CloseFunc. -func (mock *NeptunePoolMock) Close() { - if mock.CloseFunc == nil { - panic("NeptunePoolMock.CloseFunc: method is nil but NeptunePool.Close was just called") - } - callInfo := struct { - }{} - lockNeptunePoolMockClose.Lock() - mock.calls.Close = append(mock.calls.Close, callInfo) - lockNeptunePoolMockClose.Unlock() - mock.CloseFunc() -} - -// CloseCalls gets all the calls that were made to Close. -// Check the length with: -// len(mockedNeptunePool.CloseCalls()) -func (mock *NeptunePoolMock) CloseCalls() []struct { -} { - var calls []struct { - } - lockNeptunePoolMockClose.RLock() - calls = mock.calls.Close - lockNeptunePoolMockClose.RUnlock() - return calls -} - -// Execute calls ExecuteFunc. -func (mock *NeptunePoolMock) Execute(query string, bindings map[string]string, rebindings map[string]string) ([]gremgo.Response, error) { - if mock.ExecuteFunc == nil { - panic("NeptunePoolMock.ExecuteFunc: method is nil but NeptunePool.Execute was just called") - } - callInfo := struct { - Query string - Bindings map[string]string - Rebindings map[string]string - }{ - Query: query, - Bindings: bindings, - Rebindings: rebindings, - } - lockNeptunePoolMockExecute.Lock() - mock.calls.Execute = append(mock.calls.Execute, callInfo) - lockNeptunePoolMockExecute.Unlock() - return mock.ExecuteFunc(query, bindings, rebindings) -} - -// ExecuteCalls gets all the calls that were made to Execute. -// Check the length with: -// len(mockedNeptunePool.ExecuteCalls()) -func (mock *NeptunePoolMock) ExecuteCalls() []struct { - Query string - Bindings map[string]string - Rebindings map[string]string -} { - var calls []struct { - Query string - Bindings map[string]string - Rebindings map[string]string - } - lockNeptunePoolMockExecute.RLock() - calls = mock.calls.Execute - lockNeptunePoolMockExecute.RUnlock() - return calls -} - -// Get calls GetFunc. -func (mock *NeptunePoolMock) Get(query string, bindings map[string]string, rebindings map[string]string) ([]graphson.Vertex, error) { - if mock.GetFunc == nil { - panic("NeptunePoolMock.GetFunc: method is nil but NeptunePool.Get was just called") - } - callInfo := struct { - Query string - Bindings map[string]string - Rebindings map[string]string - }{ - Query: query, - Bindings: bindings, - Rebindings: rebindings, - } - lockNeptunePoolMockGet.Lock() - mock.calls.Get = append(mock.calls.Get, callInfo) - lockNeptunePoolMockGet.Unlock() - return mock.GetFunc(query, bindings, rebindings) -} - -// GetCalls gets all the calls that were made to Get. -// Check the length with: -// len(mockedNeptunePool.GetCalls()) -func (mock *NeptunePoolMock) GetCalls() []struct { - Query string - Bindings map[string]string - Rebindings map[string]string -} { - var calls []struct { - Query string - Bindings map[string]string - Rebindings map[string]string - } - lockNeptunePoolMockGet.RLock() - calls = mock.calls.Get - lockNeptunePoolMockGet.RUnlock() - return calls -} - -// GetCount calls GetCountFunc. -func (mock *NeptunePoolMock) GetCount(q string, bindings map[string]string, rebindings map[string]string) (int64, error) { - if mock.GetCountFunc == nil { - panic("NeptunePoolMock.GetCountFunc: method is nil but NeptunePool.GetCount was just called") - } - callInfo := struct { - Q string - Bindings map[string]string - Rebindings map[string]string - }{ - Q: q, - Bindings: bindings, - Rebindings: rebindings, - } - lockNeptunePoolMockGetCount.Lock() - mock.calls.GetCount = append(mock.calls.GetCount, callInfo) - lockNeptunePoolMockGetCount.Unlock() - return mock.GetCountFunc(q, bindings, rebindings) -} - -// GetCountCalls gets all the calls that were made to GetCount. -// Check the length with: -// len(mockedNeptunePool.GetCountCalls()) -func (mock *NeptunePoolMock) GetCountCalls() []struct { - Q string - Bindings map[string]string - Rebindings map[string]string -} { - var calls []struct { - Q string - Bindings map[string]string - Rebindings map[string]string - } - lockNeptunePoolMockGetCount.RLock() - calls = mock.calls.GetCount - lockNeptunePoolMockGetCount.RUnlock() - return calls -} - -// GetE calls GetEFunc. -func (mock *NeptunePoolMock) GetE(q string, bindings map[string]string, rebindings map[string]string) (interface{}, error) { - if mock.GetEFunc == nil { - panic("NeptunePoolMock.GetEFunc: method is nil but NeptunePool.GetE was just called") - } - callInfo := struct { - Q string - Bindings map[string]string - Rebindings map[string]string - }{ - Q: q, - Bindings: bindings, - Rebindings: rebindings, - } - lockNeptunePoolMockGetE.Lock() - mock.calls.GetE = append(mock.calls.GetE, callInfo) - lockNeptunePoolMockGetE.Unlock() - return mock.GetEFunc(q, bindings, rebindings) -} - -// GetECalls gets all the calls that were made to GetE. -// Check the length with: -// len(mockedNeptunePool.GetECalls()) -func (mock *NeptunePoolMock) GetECalls() []struct { - Q string - Bindings map[string]string - Rebindings map[string]string -} { - var calls []struct { - Q string - Bindings map[string]string - Rebindings map[string]string - } - lockNeptunePoolMockGetE.RLock() - calls = mock.calls.GetE - lockNeptunePoolMockGetE.RUnlock() - return calls -} - -// GetStringList calls GetStringListFunc. -func (mock *NeptunePoolMock) GetStringList(query string, bindings map[string]string, rebindings map[string]string) ([]string, error) { - if mock.GetStringListFunc == nil { - panic("NeptunePoolMock.GetStringListFunc: method is nil but NeptunePool.GetStringList was just called") - } - callInfo := struct { - Query string - Bindings map[string]string - Rebindings map[string]string - }{ - Query: query, - Bindings: bindings, - Rebindings: rebindings, - } - lockNeptunePoolMockGetStringList.Lock() - mock.calls.GetStringList = append(mock.calls.GetStringList, callInfo) - lockNeptunePoolMockGetStringList.Unlock() - return mock.GetStringListFunc(query, bindings, rebindings) -} - -// GetStringListCalls gets all the calls that were made to GetStringList. -// Check the length with: -// len(mockedNeptunePool.GetStringListCalls()) -func (mock *NeptunePoolMock) GetStringListCalls() []struct { - Query string - Bindings map[string]string - Rebindings map[string]string -} { - var calls []struct { - Query string - Bindings map[string]string - Rebindings map[string]string - } - lockNeptunePoolMockGetStringList.RLock() - calls = mock.calls.GetStringList - lockNeptunePoolMockGetStringList.RUnlock() - return calls -} - -// OpenStreamCursor calls OpenStreamCursorFunc. -func (mock *NeptunePoolMock) OpenStreamCursor(ctx context.Context, query string, bindings map[string]string, rebindings map[string]string) (*gremgo.Stream, error) { - if mock.OpenStreamCursorFunc == nil { - panic("NeptunePoolMock.OpenStreamCursorFunc: method is nil but NeptunePool.OpenStreamCursor was just called") - } - callInfo := struct { - Ctx context.Context - Query string - Bindings map[string]string - Rebindings map[string]string - }{ - Ctx: ctx, - Query: query, - Bindings: bindings, - Rebindings: rebindings, - } - lockNeptunePoolMockOpenStreamCursor.Lock() - mock.calls.OpenStreamCursor = append(mock.calls.OpenStreamCursor, callInfo) - lockNeptunePoolMockOpenStreamCursor.Unlock() - return mock.OpenStreamCursorFunc(ctx, query, bindings, rebindings) -} - -// OpenStreamCursorCalls gets all the calls that were made to OpenStreamCursor. -// Check the length with: -// len(mockedNeptunePool.OpenStreamCursorCalls()) -func (mock *NeptunePoolMock) OpenStreamCursorCalls() []struct { - Ctx context.Context - Query string - Bindings map[string]string - Rebindings map[string]string -} { - var calls []struct { - Ctx context.Context - Query string - Bindings map[string]string - Rebindings map[string]string - } - lockNeptunePoolMockOpenStreamCursor.RLock() - calls = mock.calls.OpenStreamCursor - lockNeptunePoolMockOpenStreamCursor.RUnlock() - return calls -} diff --git a/vendor/github.com/ONSdigital/dp-graph/neptune/mapper.go b/vendor/github.com/ONSdigital/dp-graph/neptune/mapper.go deleted file mode 100644 index c47c8a50..00000000 --- a/vendor/github.com/ONSdigital/dp-graph/neptune/mapper.go +++ /dev/null @@ -1,132 +0,0 @@ -package neptune - -/* -This module is dedicated to the needs of the hierarchy API. -*/ - -import ( - "fmt" - - "github.com/ONSdigital/dp-graph/neptune/query" - "github.com/ONSdigital/dp-hierarchy-api/models" - "github.com/ONSdigital/go-ns/log" - "github.com/ONSdigital/graphson" -) - -func (n *NeptuneDB) buildHierarchyNodeFromGraphsonVertex(v graphson.Vertex, instanceID, dimension string, wantBreadcrumbs bool) (res *models.Response, err error) { - logData := log.Data{"fn": "buildHierarchyNodeFromGraphsonVertex"} - - res = &models.Response{} - // Note we are using the vertex' *code* property for the response model's - // ID field - because in the case of a hierarchy node, this is the ID - // used to format links. - if res.ID, err = v.GetProperty("code"); err != nil { - log.ErrorC("bad GetProp code", err, logData) - return - } - - if res.Label, err = v.GetLabel(); err != nil { - log.ErrorC("bad label", err, logData) - return - } - if res.NoOfChildren, err = v.GetPropertyInt64("numberOfChildren"); err != nil { - log.ErrorC("bad numberOfChildren", err, logData) - return - } - if res.HasData, err = v.GetPropertyBool("hasData"); err != nil { - log.ErrorC("bad hasData", err, logData) - return - } - // Fetch new data from the database concerned with the node's children. - if res.NoOfChildren > 0 && instanceID != "" { - var code string - if code, err = v.GetProperty("code"); err != nil { - log.ErrorC("bad GetProp code", err, logData) - return - } - - gremStmt := fmt.Sprintf(query.GetChildren, instanceID, dimension, code) - logData["statement"] = gremStmt - - var childVertices []graphson.Vertex - if childVertices, err = n.getVertices(gremStmt); err != nil { - log.ErrorC("get", err, logData) - return - } - if int64(len(childVertices)) != res.NoOfChildren { - logData["num_children_prop"] = res.NoOfChildren - logData["num_children_get"] = len(childVertices) - logData["node_id"] = res.ID - log.Info("child count mismatch", logData) - } - var childElement *models.Element - for _, child := range childVertices { - if childElement, err = convertVertexToElement(child); err != nil { - log.ErrorC("converting child", err, logData) - return - } - res.Children = append(res.Children, childElement) - } - } - // Fetch new data from the database concerned with the node's breadcrumbs. - if wantBreadcrumbs { - res.Breadcrumbs, err = n.buildBreadcrumbs(instanceID, dimension, res.ID) - if err != nil { - log.ErrorC("building breadcrumbs", err, logData) - } - } - return -} - -/* -buildBreadcrumbs launches a new query to the database, to trace the (recursive) -parentage of a hierarcy node. It converts the returned chain of parent -graphson vertices into a chain of models.Element, and returns this list of -elements. -*/ -func (n *NeptuneDB) buildBreadcrumbs(instanceID, dimension, code string) ([]*models.Element, error) { - logData := log.Data{"fn": "buildBreadcrumbs"} - gremStmt := fmt.Sprintf(query.GetAncestry, instanceID, dimension, code) - logData["statement"] = gremStmt - ancestorVertices, err := n.getVertices(gremStmt) - if err != nil { - log.ErrorC("getVertices", err, logData) - return nil, err - } - elements := []*models.Element{} - for _, ancestor := range ancestorVertices { - element, err := convertVertexToElement(ancestor) - if err != nil { - log.ErrorC("convertVertexToElement", err, logData) - return nil, err - } - elements = append(elements, element) - } - return elements, nil -} - -func convertVertexToElement(v graphson.Vertex) (res *models.Element, err error) { - logData := log.Data{"fn": "convertVertexToElement"} - res = &models.Element{} - // Note we are using the vertex' *code* property for the response model's - // ID field - because in the case of a hierarchy node, this is the ID - // used to format links. - if res.ID, err = v.GetProperty("code"); err != nil { - log.ErrorC("bad GetProp code", err, logData) - return - } - - if res.Label, err = v.GetLabel(); err != nil { - log.ErrorC("bad label", err, logData) - return - } - if res.NoOfChildren, err = v.GetPropertyInt64("numberOfChildren"); err != nil { - log.ErrorC("bad numberOfChildren", err, logData) - return - } - if res.HasData, err = v.GetPropertyBool("hasData"); err != nil { - log.ErrorC("bad hasData", err, logData) - return - } - return -} diff --git a/vendor/github.com/ONSdigital/dp-graph/neptune/mockedneptune.go b/vendor/github.com/ONSdigital/dp-graph/neptune/mockedneptune.go deleted file mode 100644 index e48df189..00000000 --- a/vendor/github.com/ONSdigital/dp-graph/neptune/mockedneptune.go +++ /dev/null @@ -1,24 +0,0 @@ -package neptune - -import ( - "github.com/ONSdigital/dp-graph/neptune/internal" -) - -/* -This module provides the MockDB factory function to make a NeptuneDB into -which a mocked implementation of the gremgo driver's Pool may be injected -to avoid real database access. -*/ - -import ( - "github.com/ONSdigital/dp-graph/neptune/driver" -) - -// mockDB provides a NeptuneDB, into which you can pass a mocked -// NeptunePoolMock implementation, and thus write tests that bypass real -// database communication. -func mockDB(poolMock *internal.NeptunePoolMock) *NeptuneDB { - driver := driver.NeptuneDriver{Pool: poolMock} - db := &NeptuneDB{driver, 5, 30} - return db -} diff --git a/vendor/github.com/ONSdigital/dp-graph/neptune/neptune.go b/vendor/github.com/ONSdigital/dp-graph/neptune/neptune.go deleted file mode 100644 index d02c084b..00000000 --- a/vendor/github.com/ONSdigital/dp-graph/neptune/neptune.go +++ /dev/null @@ -1,258 +0,0 @@ -package neptune - -import ( - "context" - "errors" - "fmt" - "math" - "math/rand" - "strings" - "time" - - "github.com/ONSdigital/dp-graph/neptune/driver" - "github.com/ONSdigital/go-ns/log" - "github.com/ONSdigital/graphson" - gremgo "github.com/ONSdigital/gremgo-neptune" -) - -type NeptuneDB struct { - driver.NeptuneDriver - - maxAttempts int - timeout int -} - -func New(dbAddr string, size, timeout, retries int, errs chan error) (n *NeptuneDB, err error) { - // set defaults if not provided - if size == 0 { - size = 30 - } - if timeout == 0 { - timeout = 30 - } - if retries == 0 { - retries = 5 - } - - var d *driver.NeptuneDriver - if d, err = driver.New(context.Background(), dbAddr, errs); err != nil { - return - } - - // seed for sleepy() below - rand.Seed(time.Now().Unix()) - - n = &NeptuneDB{ - *d, - 1 + retries, - timeout, - } - return -} - -func (n *NeptuneDB) getVertices(gremStmt string) (vertices []graphson.Vertex, err error) { - logData := log.Data{"fn": "getVertices", "statement": gremStmt, "attempt": 1} - - var res interface{} - for attempt := 1; attempt < n.maxAttempts; attempt++ { - if attempt > 1 { - log.ErrorC("will retry", err, logData) - sleepy(attempt, 20*time.Millisecond) - logData["attempt"] = attempt - } - res, err = n.Pool.Get(gremStmt, nil, nil) - if err == nil { - var ok bool - if vertices, ok = res.([]graphson.Vertex); !ok { - err = errors.New("cannot cast Get results to []Vertex") - log.ErrorC("cast", err, logData) - return - } - // success - return - } - // XXX check err for non-retriable errors - if !isTransientError(err) { - return - } - } - // ASSERT: failed all attempts - log.ErrorC("maxAttempts reached", err, logData) - err = ErrAttemptsExceededLimit{err} - return - return -} - -func (n *NeptuneDB) getStringList(gremStmt string) (strings []string, err error) { - logData := log.Data{"fn": "getStringList", "statement": gremStmt, "attempt": 1} - - for attempt := 1; attempt < n.maxAttempts; attempt++ { - if attempt > 1 { - log.ErrorC("will retry", err, logData) - sleepy(attempt, 20*time.Millisecond) - logData["attempt"] = attempt - } - strings, err = n.Pool.GetStringList(gremStmt, nil, nil) - if err == nil { - return - } - // XXX check err for non-retriable errors - if !isTransientError(err) { - return - } - } - // ASSERT: failed all attempts - log.ErrorC("maxAttempts reached", err, logData) - err = ErrAttemptsExceededLimit{err} - return -} - -func (n *NeptuneDB) getVertex(gremStmt string) (vertex graphson.Vertex, err error) { - logData := log.Data{"fn": "getVertex", "statement": gremStmt} - - var vertices []graphson.Vertex - if vertices, err = n.getVertices(gremStmt); err != nil { - log.ErrorC("get", err, logData) - return - } - if len(vertices) != 1 { - err = errors.New("expected one vertex") - log.ErrorC("not one", err, logData) - return - } - return vertices[0], nil -} - -func (n *NeptuneDB) getEdges(gremStmt string) (edges []graphson.Edge, err error) { - logData := log.Data{"fn": "getEdges", "statement": gremStmt, "attempt": 1} - - var res interface{} - for attempt := 1; attempt < n.maxAttempts; attempt++ { - if attempt > 1 { - log.ErrorC("will retry", err, logData) - sleepy(attempt, 20*time.Millisecond) - logData["attempt"] = attempt - } - res, err = n.Pool.GetE(gremStmt, nil, nil) - if err == nil { - // success - var ok bool - if edges, ok = res.([]graphson.Edge); !ok { - err = errors.New("cannot cast GetE results to []Edge") - log.ErrorC("cast", err, logData) - return - } - // return re-cast success - return - } - // XXX check err for non-retriable errors - if !isTransientError(err) { - return - } - } - // ASSERT: failed all attempts - log.ErrorC("maxAttempts reached", err, logData) - err = ErrAttemptsExceededLimit{err} - return -} - -func (n *NeptuneDB) exec(gremStmt string) (res []gremgo.Response, err error) { - logData := log.Data{"fn": "n.exec", "statement": gremStmt, "attempt": 1} - - for attempt := 1; attempt < n.maxAttempts; attempt++ { - if attempt > 1 { - log.ErrorC("will retry", err, logData) - sleepy(attempt, 20*time.Millisecond) - logData["attempt"] = attempt - } - if res, err = n.Pool.Execute(gremStmt, nil, nil); err == nil { - // success - if res == nil { - err = errors.New("res returned nil") - log.ErrorC("bad res", err, logData) - return - } - logData["exec_res"] = res - log.Info("exec ok", logData) - return - } - // XXX check err more thoroughly (isTransientError?) (non-err failures?) - if !isTransientError(err) { - return - } - } - // ASSERT: failed all attempts - log.ErrorC("maxAttempts reached", err, logData) - err = ErrAttemptsExceededLimit{err} - return -} - -func (n *NeptuneDB) getNumber(gremStmt string) (count int64, err error) { - logData := log.Data{"fn": "n.getNumber", "statement": gremStmt, "attempt": 1} - - for attempt := 1; attempt < n.maxAttempts; attempt++ { - if attempt > 1 { - log.ErrorC("will retry", err, logData) - sleepy(attempt, 20*time.Millisecond) - logData["attempt"] = attempt - } - if count, err = n.Pool.GetCount(gremStmt, nil, nil); err == nil { - // success, so return number - return - } - // XXX check non-nil err more thoroughly (isTransientError?) - if !isTransientError(err) { - return - } - } - // ASSERT: failed all attempts - log.ErrorC("maxAttempts reached", err, logData) - err = ErrAttemptsExceededLimit{err} - return -} - -// ErrAttemptsExceededLimit is returned when the number of attempts has reached -// the maximum permitted -type ErrAttemptsExceededLimit struct { - WrappedErr error -} - -func (e ErrAttemptsExceededLimit) Error() string { - return fmt.Sprintf("number of attempts to execute statement exceeded: %s", e.WrappedErr.Error()) -} - -/* -func (n *Neptune) checkAttempts(err error, instanceID string, attempt int) error { - if !isTransientError(err) { - log.Info("received an error from neptune that cannot be retried", - log.Data{"instance_id": instanceID, "error": err}) - - return err - } - - time.Sleep(getSleepTime(attempt, 20*time.Millisecond)) - - if attempt >= n.maxRetries { - return ErrAttemptsExceededLimit{err} - } - - return nil -} -*/ -func isTransientError(err error) bool { - if strings.Contains(err.Error(), " MALFORMED REQUEST ") || - strings.Contains(err.Error(), " INVALID REQUEST ARGUMENTS ") { - return false - } - return true -} - -// sleepy sleeps for a time which increases, based on the attempt and initial retry time. -// It uses the algorithm 2^n where n is the attempt number (double the previous) and -// a randomization factor of between 0-5ms so that the server isn't being hit constantly -// at the same time by many clients -func sleepy(attempt int, retryTime time.Duration) { - n := (math.Pow(2, float64(attempt))) - rnd := time.Duration(rand.Intn(4)+1) * time.Millisecond - time.Sleep((time.Duration(n) * retryTime) - rnd) -} diff --git a/vendor/github.com/ONSdigital/dp-graph/neptune/observation.go b/vendor/github.com/ONSdigital/dp-graph/neptune/observation.go deleted file mode 100644 index ffef0caf..00000000 --- a/vendor/github.com/ONSdigital/dp-graph/neptune/observation.go +++ /dev/null @@ -1,111 +0,0 @@ -package neptune - -import ( - "context" - "fmt" - "strings" - "time" - - "github.com/pkg/errors" - - "github.com/ONSdigital/dp-graph/neptune/query" - "github.com/ONSdigital/dp-graph/observation" - "github.com/ONSdigital/dp-observation-importer/models" - "github.com/ONSdigital/go-ns/log" -) - -// ErrInvalidFilter is returned if the provided filter is nil. -var ErrInvalidFilter = errors.New("nil filter cannot be processed") - -// TODO: this global state is only used for metrics in InsertObservationBatch, -// not used in actual code flow, but should be revisited before production use -var batchCount = 0 -var totalTime time.Time - -func (n *NeptuneDB) StreamCSVRows(ctx context.Context, filter *observation.Filter, limit *int) (observation.StreamRowReader, error) { - if filter == nil { - return nil, ErrInvalidFilter - } - - q := fmt.Sprintf(query.GetInstanceHeaderPart, filter.InstanceID) - - q += buildObservationsQuery(filter) - q += query.GetObservationSelectRowPart - - if limit != nil { - q += fmt.Sprintf(query.LimitPart, *limit) - } - - return n.Pool.OpenStreamCursor(ctx, q, nil, nil) -} - -func buildObservationsQuery(f *observation.Filter) string { - if f.IsEmpty() { - return fmt.Sprintf(query.GetAllObservationsPart, f.InstanceID) - } - - q := fmt.Sprintf(query.GetObservationsPart, f.InstanceID) - var selectOpts []string - - for _, dim := range f.DimensionFilters { - if len(dim.Options) == 0 { - continue - } - - for i, opt := range dim.Options { - dim.Options[i] = fmt.Sprintf("'%s'", opt) - } - - selectOpts = append(selectOpts, fmt.Sprintf(query.GetObservationDimensionPart, f.InstanceID, dim.Name, strings.Join(dim.Options, ","))) - } - - //comma separate dimension option selections and close match statement - q += strings.Join(selectOpts, ",") - q += ")" - - return q -} - -func (n *NeptuneDB) InsertObservationBatch(ctx context.Context, attempt int, instanceID string, observations []*models.Observation, dimensionNodeIDs map[string]string) error { - if len(observations) == 0 { - log.Info("no observations in batch", log.Data{"instance_ID": instanceID}) - return nil - } - - bID := batchCount - batchCount++ - batchStart := time.Now() - if totalTime.IsZero() { - totalTime = batchStart - } else { - log.Info("opening batch", log.Data{"size": len(observations), "batchID": bID}) - } - - var create string - for _, o := range observations { - create += fmt.Sprintf(query.DropObservationRelationships, instanceID, o.Row) - create += fmt.Sprintf(query.DropObservation, instanceID, o.Row) - create += fmt.Sprintf(query.CreateObservationPart, instanceID, o.Row, o.RowIndex) - for _, d := range o.DimensionOptions { - dimensionName := strings.ToLower(d.DimensionName) - dimensionLookup := instanceID + "_" + dimensionName + "_" + d.Name - - nodeID, ok := dimensionNodeIDs[dimensionLookup] - if !ok { - return fmt.Errorf("no nodeID [%s] found in dimension map", dimensionLookup) - } - - create += fmt.Sprintf(query.AddObservationRelationshipPart, nodeID, instanceID, d.DimensionName, d.Name) - } - - create = strings.TrimSuffix(create, ".outV()") - create += ".iterate() " - } - - if _, err := n.exec(create); err != nil { - return err - } - - log.Info("batch complete", log.Data{"batchID": bID, "elapsed": time.Since(totalTime), "batchTime": time.Since(batchStart)}) - return nil -} diff --git a/vendor/github.com/ONSdigital/dp-graph/neptune/query/query.go b/vendor/github.com/ONSdigital/dp-graph/neptune/query/query.go deleted file mode 100644 index 789ffe1e..00000000 --- a/vendor/github.com/ONSdigital/dp-graph/neptune/query/query.go +++ /dev/null @@ -1,109 +0,0 @@ -package query - -const ( - // codelists - GetCodeLists = "g.V().hasLabel('_code_list')" - GetCodeListsFiltered = "g.V().hasLabel('_code_list').has('%s', true)" - GetCodeList = "g.V().hasLabel('_code_list').has('listID', '%s')" - CodeListExists = "g.V().hasLabel('_code_list').has('listID', '%s').count()" - CodeListEditionExists = "g.V().hasLabel('_code_list').has('listID', '%s').has('edition', '%s').count()" - GetCodes = "g.V().hasLabel('_code_list')" + - ".has('listID', '%s').has('edition', '%s')" + - ".in('usedBy').hasLabel('_code')" - CodeExists = "g.V().hasLabel('_code_list')" + - ".has('listID', '%s').has('edition', '%s')" + - ".in('usedBy').has('value', '%s').count()" - - /* - This query harvests data from both edges and nodes, so we collapse - the response to contain only strings - to make it parse-able with - the graphson string-list method. - - %s Parameters: codeListID, codeListEdition, codeValue - - Naming: - - r: usedBy relation - rl: usedBy.label - c: code node - d: dataset - de: dataset.edition - dv: dataset.version - */ - GetCodeDatasets = `g.V().hasLabel('_code_list').has('listID', '%s'). - has('edition','%s'). - inE('usedBy').as('r').values('label').as('rl').select('r'). - match( - __.as('r').outV().has('value','%s').as('c'), - __.as('c').out('inDataset').as('d'). - select('d').values('edition').as('de'). - select('d').values('version').as('dv'), - select('d').values('dataset_id').as('did'). - __.as('d').has('is_published',true)). - union(select('rl', 'de', 'dv', 'did')).unfold().select(values) - ` - - // hierarchy write - CloneHierarchyNodes = "g.V().hasLabel('_generic_hierarchy_node_%s').as('old')" + - ".addV('_hierarchy_node_%s_%s')" + - ".property('code',select('old').values('code'))" + - ".property('label',select('old').values('label'))" + - ".property(single, 'hasData', false)" + - ".property('code_list','%s').as('new')" + - ".addE('clone_of').to('old').select('new')" - CountHierarchyNodes = "g.V().hasLabel('_hierarchy_node_%s_%s').count()" - CloneHierarchyRelationships = "g.V().hasLabel('_generic_hierarchy_node_%s').as('oc')" + - ".out('hasParent')" + - ".in('clone_of').hasLabel('_hierarchy_node_%s_%s')" + - ".addE('hasParent').from(select('oc').in('clone_of').hasLabel('_hierarchy_node_%s_%s'))" - RemoveCloneMarkers = "g.V().hasLabel('_hierarchy_node_%s_%s').outE('clone_of').drop()" - SetNumberOfChildren = "g.V().hasLabel('_hierarchy_node_%s_%s').property(single,'numberOfChildren',__.in('hasParent').count())" - SetHasData = "g.V().hasLabel('_hierarchy_node_%s_%s').as('v')" + - `.V().hasLabel('_%s_%s').as('c').where('v',eq('c')).by('code').by('value').` + - `select('v').property('hasData',true)` - MarkNodesToRemain = "g.V().hasLabel('_hierarchy_node_%s_%s').has('hasData').property('remain',true)" + - ".repeat(out('hasParent')).emit().property('remain',true)" - RemoveNodesNotMarkedToRemain = "g.V().hasLabel('_hierarchy_node_%s_%s').not(has('remain',true)).drop()" - RemoveRemainMarker = "g.V().hasLabel('_hierarchy_node_%s_%s').has('remain').properties('remain').drop()" - - // hierarchy read - HierarchyExists = "g.V().hasLabel('_hierarchy_node_%s_%s').limit(1)" - GetHierarchyRoot = "g.V().hasLabel('_hierarchy_node_%s_%s').not(outE('hasParent'))" - GetHierarchyElement = "g.V().hasLabel('_hierarchy_node_%s_%s').has('code','%s')" - GetChildren = "g.V().hasLabel('_hierarchy_node_%s_%s').has('code','%s').in('hasParent').order().by('label')" - // Note this query is recursive - GetAncestry = "g.V().hasLabel('_hierarchy_node_%s_%s').has('code', '%s').repeat(out('hasParent')).emit()" - - // instance - import process - CreateInstance = "g.addV('_%s_Instance').property(single,'header','%s')" - CheckInstance = "g.V().hasLabel('_%s_Instance').count()" - CreateInstanceToCodeRelationship = "g.V().hasLabel('_%s_Instance').as('i').addE('inDataset').from(" + - "V().hasLabel('_code').has('value','%s').where(out('usedBy').hasLabel('_code_list').has('listID','%s'))" + - ")" - AddVersionDetailsToInstance = "g.V().hasLabel('_%s_Instance').property(single,'dataset_id','%s')." + - "property(single,'edition','%s').property(single,'version','%s')" - SetInstanceIsPublished = "g.V().hasLabel('_%s_Instance').property(single,'is_published',true)" - CountObservations = "g.V().hasLabel('_%s_observation').count()" - - //instance - parts - AddInstanceDimensionsPart = "g.V().hasLabel('_%s_Instance')" - AddInstanceDimensionsPropertyPart = ".property(list, 'dimensions', '%s')" - - // dimension - CreateDimensionToInstanceRelationship = "g.V().hasLabel('_%s_%s').has('value', '%s').fold().coalesce(unfold(), " + - "addV('_%s_%s').as('d').property('value','%s').addE('HAS_DIMENSION').from(V().hasLabel('_%s_Instance')).select('d'))" - - // observation - DropObservationRelationships = "g.V().hasLabel('_%s_observation').has('value', '%s').bothE().drop().iterate()" - DropObservation = "g.V().hasLabel('_%s_observation').has('value', '%s').drop().iterate()" - CreateObservationPart = "g.addV('_%s_observation').property(single, 'value', '%s').property(single, 'rowIndex', '%d')" - AddObservationRelationshipPart = ".addE('isValueOf').to(V().hasId('%s').hasLabel('_%s_%s').where(values('value').is('%s'))).outV()" - - GetInstanceHeaderPart = "g.V().hasLabel('_%s_Instance').as('instance')" - GetAllObservationsPart = ".V().hasLabel('_%s_observation').values('row')" - - GetObservationsPart = ".V().hasLabel('_%s_observation').match(" - GetObservationDimensionPart = "__.as('row').out('isValueOf').hasLabel('_%s_%s').where(values('value').is(within(%s)))" - GetObservationSelectRowPart = ".select('instance', 'row').by('header').by('row').unfold().dedup().select(values)" - LimitPart = ".limit(%d)" -) diff --git a/vendor/github.com/ONSdigital/graphson/deserialize.go b/vendor/github.com/ONSdigital/graphson/deserialize.go deleted file mode 100644 index 33e13ca9..00000000 --- a/vendor/github.com/ONSdigital/graphson/deserialize.go +++ /dev/null @@ -1,249 +0,0 @@ -package graphson - -import ( - "bytes" - "encoding/json" - "errors" - "fmt" -) - -// DeserializeVertices converts a graphson string to a slice of Vertex -func DeserializeVertices(rawResponse string) ([]Vertex, error) { - // TODO: empty strings for property values will cause invalid json - // make so it can handle that case - if len(rawResponse) == 0 { - return []Vertex{}, nil - } - return DeserializeVerticesFromBytes([]byte(rawResponse)) -} - -// DeserializeVerticesFromBytes returns a slice of Vertex from the graphson rawResponse list of vertex -func DeserializeVerticesFromBytes(rawResponse []byte) ([]Vertex, error) { - // TODO: empty strings for property values will cause invalid json - // make so it can handle that case - var response []Vertex - if len(rawResponse) == 0 { - return response, nil - } - dec := json.NewDecoder(bytes.NewReader(rawResponse)) - dec.DisallowUnknownFields() - if err := dec.Decode(&response); err != nil { - return nil, err - } - return response, nil -} - -// DeserializeListOfVerticesFromBytes returns a slice of Vertex from the graphson rawResponse g:List of vertex -func DeserializeListOfVerticesFromBytes(rawResponse []byte) ([]Vertex, error) { - var metaResponse ListVertices - var response []Vertex - if len(rawResponse) == 0 { - return response, nil - } - dec := json.NewDecoder(bytes.NewReader(rawResponse)) - dec.DisallowUnknownFields() - if err := dec.Decode(&metaResponse); err != nil { - return nil, err - } - - if metaResponse.Type != "g:List" { - return response, fmt.Errorf("DeserializeListOfVerticesFromBytes: Expected `g:List` type, but got %q", metaResponse.Type) - } - - return metaResponse.Value, nil -} - -func DeserializeListOfEdgesFromBytes(rawResponse []byte) (Edges, error) { - var metaResponse ListEdges - var response Edges - if len(rawResponse) == 0 { - return response, nil - } - dec := json.NewDecoder(bytes.NewReader(rawResponse)) - dec.DisallowUnknownFields() - err := dec.Decode(&metaResponse) - if err != nil { - return nil, err - } - - if metaResponse.Type != "g:List" { - return response, fmt.Errorf("DeserializeListOfEdgesFromBytes: Expected `g:List` type, but got %q", metaResponse.Type) - } - - return metaResponse.Value, nil -} - -func DeserializeMapFromBytes(rawResponse []byte) (resMap map[string]interface{}, err error) { - var metaResponse GList - if len(rawResponse) == 0 { - return - } - dec := json.NewDecoder(bytes.NewReader(rawResponse)) - dec.DisallowUnknownFields() - if err = dec.Decode(&metaResponse); err != nil { - return nil, err - } - - if metaResponse.Type != "g:Map" { - return resMap, fmt.Errorf("DeserializeMapFromBytes: Expected `g:Map` type, but got %q", metaResponse.Type) - } - - return resMap, nil -} - -// DeserializePropertiesFromBytes is for converting vertex .properties() results into a map -func DeserializePropertiesFromBytes(rawResponse []byte, resMap map[string][]interface{}) (err error) { - var metaResponse GList - if len(rawResponse) == 0 { - return - } - dec := json.NewDecoder(bytes.NewReader(rawResponse)) - dec.DisallowUnknownFields() - if err = dec.Decode(&metaResponse); err != nil { - return - } - - if metaResponse.Type != "g:List" { - return fmt.Errorf("DeserializePropertiesFromBytes: Expected `g:List` type, but got %q", metaResponse.Type) - } - var props []VertexProperty - if err = json.Unmarshal(metaResponse.Value, &props); err != nil { - return - } - - for _, prop := range props { - if _, ok := resMap[prop.Value.Label]; !ok { - resMap[prop.Value.Label] = []interface{}{prop.Value.Value} - } else { - resMap[prop.Value.Label] = append(resMap[prop.Value.Label], prop.Value.Value) - } - } - - return -} - -// DeserializeStringListFromBytes get a g:List value which should be a a list of strings, return those -func DeserializeStringListFromBytes(rawResponse []byte) (vals []string, err error) { - var metaResponse GList - if len(rawResponse) == 0 { - err = errors.New("DeserializeStringListFromBytes: nothing to decode") - return - } - dec := json.NewDecoder(bytes.NewReader(rawResponse)) - dec.DisallowUnknownFields() - if err = dec.Decode(&metaResponse); err != nil { - return - } - - if metaResponse.Type != "g:List" { - err = fmt.Errorf("DeserializeStringListFromBytes: Expected `g:List` type, but got %q", metaResponse.Type) - return - } - - if err = json.Unmarshal(metaResponse.Value, &vals); err != nil { - return - } - return -} - -// DeserializeSingleFromBytes get a g:List value which should be a singular item, returns that item -func DeserializeSingleFromBytes(rawResponse []byte) (gV GenericValue, err error) { - var metaResponse GList - if len(rawResponse) == 0 { - err = errors.New("DeserializeSingleFromBytes: nothing to decode") - return - } - dec := json.NewDecoder(bytes.NewReader(rawResponse)) - dec.DisallowUnknownFields() - if err = dec.Decode(&metaResponse); err != nil { - return - } - - if metaResponse.Type != "g:List" { - err = fmt.Errorf("DeserializeSingleFromBytes: Expected `g:List` type, but got %q", metaResponse.Type) - return - } - - var genVals GenericValues - if genVals, err = DeserializeGenericValues(string(metaResponse.Value)); err != nil { - return - } - - if len(genVals) != 1 { - err = fmt.Errorf("DeserializeSingleFromBytes: Expected single value, got %d", len(genVals)) - return - } - - return genVals[0], nil -} - -// DeserializeNumber returns the count from the g:List'd database response -func DeserializeNumber(rawResponse []byte) (count int64, err error) { - var genVal GenericValue - if genVal, err = DeserializeSingleFromBytes(rawResponse); err != nil { - return - } - - if genVal.Type != "g:Int64" { - err = fmt.Errorf("DeserializeNumber: Expected `g:Int64` type, but got %q", genVal.Type) - return - } - count = int64(genVal.Value.(float64)) - return -} - -func DeserializeEdges(rawResponse string) (Edges, error) { - var response Edges - if rawResponse == "" { - return response, nil - } - err := json.Unmarshal([]byte(rawResponse), &response) - if err != nil { - return nil, err - } - return response, nil -} - -func DeserializeGenericValue(rawResponse string) (response GenericValue, err error) { - if len(rawResponse) == 0 { - return - } - if err = json.Unmarshal([]byte(rawResponse), &response); err != nil { - return - } - return -} - -func DeserializeGenericValues(rawResponse string) (GenericValues, error) { - var response GenericValues - if rawResponse == "" { - return response, nil - } - err := json.Unmarshal([]byte(rawResponse), &response) - if err != nil { - return nil, err - } - return response, nil -} - -func ConvertToCleanVertices(vertices []Vertex) []CleanVertex { - var responseVertices []CleanVertex - for _, vertex := range vertices { - responseVertices = append(responseVertices, CleanVertex{ - Id: vertex.Value.ID, - Label: vertex.Value.Label, - }) - } - return responseVertices -} - -func ConvertToCleanEdges(edges Edges) []CleanEdge { - var responseEdges []CleanEdge - for _, edge := range edges { - responseEdges = append(responseEdges, CleanEdge{ - Source: edge.Value.InV, - Target: edge.Value.OutV, - }) - } - return responseEdges -} diff --git a/vendor/github.com/ONSdigital/graphson/types.go b/vendor/github.com/ONSdigital/graphson/types.go deleted file mode 100644 index 028027eb..00000000 --- a/vendor/github.com/ONSdigital/graphson/types.go +++ /dev/null @@ -1,153 +0,0 @@ -package graphson - -import "encoding/json" - -// cbi made up, not a real graphson or gremlin thing -// type GremlinResponse struct { -// V Vertices -// E Edges -// } - -type GList struct { - Type string `json:"@type"` - Value json.RawMessage `json:"@value"` -} - -// type GMap struct { -// Type string `json:"@type"` -// Value json.RawMessage `json:"@value"` -// } - -type ListVertices struct { - Type string `json:"@type"` - Value []Vertex `json:"@value"` -} -type ListEdges struct { - Type string `json:"@type"` - Value Edges `json:"@value"` -} - -// type Vertices []Vertex - -type Vertex struct { - Type string `json:"@type"` - Value VertexValue `json:"@value"` -} - -type VertexValue struct { - ID string `json:"id"` - Label string `json:"label"` - Properties map[string][]VertexProperty `json:"properties"` -} - -type VertexProperty struct { - Type string `json:"@type"` - Value VertexPropertyValue `json:"@value"` -} - -type EdgeProperty struct { - Type string `json:"@type"` - Value EdgePropertyValue `json:"@value"` -} - -type VertexPropertyValue struct { - ID GenericValue `json:"id"` - Label string `json:"label"` - Value interface{} `json:"value"` -} - -type EdgePropertyValue struct { - Label string `json:"key"` - // Value GenericValue `json:"value"` // this works when value is NOT a string - Value json.RawMessage `json:"value"` - // ValueStr string `json:"value"` - // Value interface{} `json:"value"` -} - -type GenericValues []GenericValue - -type GenericValue struct { - Type string `json:"@type"` - Value interface{} `json:"@value"` -} - -type Edges []Edge - -type Edge struct { - Type string `json:"@type"` - Value EdgeValue `json:"@value"` -} - -type EdgeValue struct { - ID string `json:"id"` - Label string `json:"label"` - InVLabel string `json:"inVLabel"` - OutVLabel string `json:"outVLabel"` - InV string `json:"inV"` - OutV string `json:"outV"` - Properties map[string]EdgeProperty `json:"properties"` -} - -// type CleanResponse struct { -// V []CleanVertex -// E []CleanEdge -// } - -type CleanEdge struct { - Source string `json:"source"` - Target string `json:"target"` -} - -type CleanVertex struct { - Id string `json:"id"` - Label string `json:"label"` -} - -// type MinVertex struct { -// ID string -// Label string -// Props map[string][]MinVertexProp -// } -// type MinVertexProp struct { -// // ID string -// Label string -// Value interface{} -// } - -// type UpsertVertexMap struct { -// Id string `json:""` -// Label string `json:"label"` -// } - -// type TypeID int - -// const ( -// TypeString TypeID = iota -// TypeBoolean -// TypeMap -// TypeCollection -// TypeClass -// TypeDate -// TypeDouble -// TypeFloat -// TypeInteger -// TypeLong -// TypeTimestamp -// TypeUUID -// TypeVertex -// TypeVertexProperty -// ) - -// const ( -// TypeStrDate = "g:Date" -// TypeStrDouble = "g:Double" -// TypeStrFloat = "g:Float" -// TypeStrInteger = "g:Int32" -// TypeStrLong = "g:Int64" -// TypeStrTimestamp = "g:Timestamp" -// TypeStrUUID = "g:UUID" -// TypeStrVertex = "g:Vertex" -// TypeStrVertexProperty = "g:VertexProperty" -// TypeStrProperty = "g:Property" -// TypeStrEdge = "g:Edge" -// ) diff --git a/vendor/github.com/ONSdigital/graphson/utils.go b/vendor/github.com/ONSdigital/graphson/utils.go deleted file mode 100644 index 7f55deed..00000000 --- a/vendor/github.com/ONSdigital/graphson/utils.go +++ /dev/null @@ -1,246 +0,0 @@ -package graphson - -import ( - "errors" - "strings" -) - -var ( - ErrorPropertyNotFound = errors.New("property not found") - ErrorPropertyIsMeta = errors.New("meta-property found where multi-property expected") - ErrorPropertyIsMulti = errors.New("multi-property found where singleton expected") - ErrorUnexpectedPropertyType = errors.New("property value could not be cast into expected type") -) - -// GetID returns the string ID for the given vertex -func (v Vertex) GetID() string { - return v.Value.ID -} - -// GetLabels returns the []string labels for the given vertex -func (v Vertex) GetLabels() (labels []string) { - labels = append(labels, v.Value.Label) - if strings.Index(labels[0], "::") == -1 { - return - } - return strings.Split(labels[0], "::") -} - -// GetLabel returns the string label for the given vertex, or an error if >1 -func (v Vertex) GetLabel() (string, error) { - labels := v.GetLabels() - if len(labels) > 1 { - return "", errors.New("too many labels - expected one") - } - return labels[0], nil -} - -// GetMultiProperty returns the ([]string) values for the given property `key` -// will return an error if the property is not the correct type -func (v Vertex) GetMultiProperty(key string) (vals []string, err error) { - var valsInterface []interface{} - if valsInterface, err = v.GetMultiPropertyAs(key, "string"); err != nil { - return - } - for _, val := range valsInterface { - vals = append(vals, val.(string)) - } - return -} - -// GetMultiPropertyBool returns the ([]bool) values for the given property `key` -// will return an error if the property is not the correct type -func (v Vertex) GetMultiPropertyBool(key string) (vals []bool, err error) { - var valsInterface []interface{} - if valsInterface, err = v.GetMultiPropertyAs(key, "bool"); err != nil { - return - } - for _, val := range valsInterface { - vals = append(vals, val.(bool)) - } - return -} - -// GetMultiPropertyInt64 returns the ([]int64) values for the given property `key` -// will return an error if the property is not the correct type -func (v Vertex) GetMultiPropertyInt64(key string) (vals []int64, err error) { - var valsInterface []interface{} - if valsInterface, err = v.GetMultiPropertyAs(key, "int64"); err != nil { - return - } - for _, val := range valsInterface { - vals = append(vals, val.(int64)) - } - return -} - -// GetMultiPropertyInt32 returns the ([]int32) values for the given property `key` -// will return an error if the property is not the correct type -func (v Vertex) GetMultiPropertyInt32(key string) (vals []int32, err error) { - var valsInterface []interface{} - if valsInterface, err = v.GetMultiPropertyAs(key, "int32"); err != nil { - return - } - for _, val := range valsInterface { - vals = append(vals, val.(int32)) - } - return -} - -// GetMultiPropertyAs returns the values for the given property `key` as type `wantType` -// will return an error if the property is not a set of the given `wantType` (string, bool, int64) -func (v Vertex) GetMultiPropertyAs(key, wantType string) (vals []interface{}, err error) { - var valInterface []VertexProperty - var ok bool - if valInterface, ok = v.Value.Properties[key]; !ok { - err = ErrorPropertyNotFound - return - } - for _, prop := range valInterface { - if prop.Value.Label != key { - err = ErrorPropertyIsMulti - return - } - switch wantType { - - case "string": - var val string - if val, ok = prop.Value.Value.(string); !ok { - err = ErrorUnexpectedPropertyType - return - } - vals = append(vals, val) - case "bool": - var val bool - if val, ok = prop.Value.Value.(bool); !ok { - err = ErrorUnexpectedPropertyType - return - } - vals = append(vals, val) - case "int32": - var typeIf, valIf interface{} - if typeIf, ok = prop.Value.Value.(map[string]interface{})["@type"]; !ok || typeIf != "g:Int32" { - return vals, ErrorUnexpectedPropertyType - } - if valIf, ok = prop.Value.Value.(map[string]interface{})["@value"]; !ok { - return vals, ErrorUnexpectedPropertyType - } - var val float64 - if val, ok = valIf.(float64); !ok { - return vals, ErrorUnexpectedPropertyType - } - vals = append(vals, int32(val)) - case "int64": - typedPropValue := prop.Value.Value.(map[string]interface{}) - typeAsString, ok := typedPropValue["@type"] - if !ok || (typeAsString != "g:Int64" && typeAsString != "g:Int32") { - return vals, ErrorUnexpectedPropertyType - } - var valIf interface{} - if valIf, ok = prop.Value.Value.(map[string]interface{})["@value"]; !ok { - return vals, ErrorUnexpectedPropertyType - } - var val float64 - if val, ok = valIf.(float64); !ok { - return vals, ErrorUnexpectedPropertyType - } - vals = append(vals, int64(val)) - } - } - return -} - -// GetProperty returns the single string value for a given property `key` -// will return an error if the property is not a single string -func (v Vertex) GetProperty(key string) (val string, err error) { - var vals []string - if vals, err = v.GetMultiProperty(key); err != nil { - return - } - if len(vals) == 0 { - err = ErrorPropertyNotFound - return - } - if len(vals) > 1 { - err = ErrorPropertyIsMulti - return - } - return vals[0], nil -} - -// GetPropertyInt64 returns the single int64 value for a given property `key` -// will return an error if the property is not a single string -func (v Vertex) GetPropertyInt64(key string) (val int64, err error) { - var valsInterface []interface{} - if valsInterface, err = v.GetMultiPropertyAs(key, "int64"); err != nil { - return - } - if len(valsInterface) == 0 { - err = ErrorPropertyNotFound - return - } - if len(valsInterface) > 1 { - err = ErrorPropertyIsMulti - return - } - return valsInterface[0].(int64), nil -} - -// GetPropertyInt32 returns the single int32 value for a given property `key` -// will return an error if the property is not a single string -func (v Vertex) GetPropertyInt32(key string) (val int32, err error) { - var valsInterface []interface{} - if valsInterface, err = v.GetMultiPropertyAs(key, "int32"); err != nil { - return - } - if len(valsInterface) == 0 { - err = ErrorPropertyNotFound - return - } - if len(valsInterface) > 1 { - err = ErrorPropertyIsMulti - return - } - return valsInterface[0].(int32), nil -} - -// GetPropertyBool returns the single bool value for a given property `key` -// will return an error if the property is not a single string -func (v Vertex) GetPropertyBool(key string) (val bool, err error) { - var valsInterface []interface{} - if valsInterface, err = v.GetMultiPropertyAs(key, "bool"); err != nil { - return - } - if len(valsInterface) == 0 { - err = ErrorPropertyNotFound - return - } - if len(valsInterface) > 1 { - err = ErrorPropertyIsMulti - return - } - return valsInterface[0].(bool), nil -} - -// GetMetaProperty returns a map[string]string for the given property `key` -func (v Vertex) GetMetaProperty(key string) (metaMap map[string][]string, err error) { - var valInterface []VertexProperty - var ok bool - if valInterface, ok = v.Value.Properties[key]; !ok { - err = ErrorPropertyNotFound - return - } - for _, prop := range valInterface { - subKey := prop.Value.Label - var subVal string - if subVal, ok = prop.Value.Value.(string); !ok { - err = ErrorUnexpectedPropertyType - return - } - if metaMap == nil { - metaMap = make(map[string][]string) - } - metaMap[subKey] = append(metaMap[subKey], subVal) - } - return -} diff --git a/vendor/github.com/ONSdigital/graphson/validation_utils.go b/vendor/github.com/ONSdigital/graphson/validation_utils.go deleted file mode 100644 index 39d190c4..00000000 --- a/vendor/github.com/ONSdigital/graphson/validation_utils.go +++ /dev/null @@ -1,94 +0,0 @@ -package graphson - -import ( - "fmt" -) - -func EdgesMatch(edge1, edge2 Edge) (bool, string) { - if edge1.Type != edge2.Type { - return false, "type" - } - // if ok, reason := GenericValuesMatch(edge1.Value.ID, edge2.Value.ID); !ok { - if edge1.Value.ID != edge2.Value.ID { - return false, "id" // + reason - } - if edge1.Value.Label != edge2.Value.Label { - return false, "label" - } - // if ok, reason := GenericValuesMatch(edge1.Value.InV, edge2.Value.InV); !ok { - if edge1.Value.InV != edge2.Value.InV { - return false, "inv" // + reason - } - if edge1.Value.InVLabel != edge2.Value.InVLabel { - return false, "invlabel" - } - // if ok, reason := GenericValuesMatch(edge1.Value.OutV, edge2.Value.OutV); !ok { - if edge1.Value.OutV != edge2.Value.OutV { - return false, "outv" // + reason - } - if edge1.Value.OutVLabel != edge2.Value.OutVLabel { - return false, "outvlabel" - } - if len(edge1.Value.Properties) != len(edge2.Value.Properties) { - return false, "properties" - } - for label, edge1Props := range edge1.Value.Properties { - edge2Props := edge2.Value.Properties[label] - if edge1Props.Type != edge2Props.Type { - return false, "prop.type" - } - if edge1Props.Value.Label != edge2Props.Value.Label || - fmt.Sprintf("%v", edge1Props.Value.Label) != fmt.Sprintf("%v", edge2Props.Value.Label) { - return false, "prop.value" - } - } - return true, "" -} - -func VerticesMatch(vertex1, vertex2 Vertex) bool { - if vertex1.Type != vertex2.Type { - return false - } - if vertex1.Value.ID != vertex2.Value.ID { - return false - } - if vertex1.Value.Label != vertex2.Value.Label { - return false - } - if len(vertex1.Value.Properties) != len(vertex2.Value.Properties) { - return false - } - for label, vertex1Props := range vertex1.Value.Properties { - vertex2Props := vertex2.Value.Properties[label] - if len(vertex1Props) != len(vertex2Props) { - return false - - } - for i, vertex1PropsElement := range vertex1Props { - vertex2PropsElement := vertex2Props[i] - if vertex1PropsElement.Type != vertex2PropsElement.Type { - return false - } - if vertex1PropsElement.Value.ID.Type != vertex2PropsElement.Value.ID.Type || - fmt.Sprintf("%v", vertex1PropsElement.Value.ID.Value) != fmt.Sprintf("%v", vertex2PropsElement.Value.ID.Value) { - return false - } - if vertex1PropsElement.Value.Label != vertex2PropsElement.Value.Label { - return false - } - if fmt.Sprintf("%v", vertex1PropsElement.Value.Value) != fmt.Sprintf("%v", vertex2PropsElement.Value.Value) { - return false - } - } - } - return true -} - -func GenericValuesMatch(gv1, gv2 GenericValue) (bool, string) { - if gv1.Type != gv2.Type { - return false, "type" - } - gv1ValueString := fmt.Sprintf("%v", gv1.Value) - gv2ValueString := fmt.Sprintf("%v", gv2.Value) - return gv1ValueString == gv2ValueString, "value" -} diff --git a/vendor/github.com/ONSdigital/gremgo-neptune/Dockerfile.gremlin b/vendor/github.com/ONSdigital/gremgo-neptune/Dockerfile.gremlin deleted file mode 100644 index 6216e5b0..00000000 --- a/vendor/github.com/ONSdigital/gremgo-neptune/Dockerfile.gremlin +++ /dev/null @@ -1 +0,0 @@ -FROM tinkerpop/gremlin-server \ No newline at end of file diff --git a/vendor/github.com/ONSdigital/gremgo-neptune/Makefile b/vendor/github.com/ONSdigital/gremgo-neptune/Makefile deleted file mode 100644 index 26f49710..00000000 --- a/vendor/github.com/ONSdigital/gremgo-neptune/Makefile +++ /dev/null @@ -1,21 +0,0 @@ -.DEFAULT_GOAL:= all - -.PHONY: all -all: vet test - -.PHONY: vet -vet: - @go vet -v - -.PHONY: test -test: - @go test -v - -.PHONY: test-bench -test-bench: - @go test -v -bench=. -race - -.PHONY: gremlin -gremlin: - @docker build -t gremgo-neptune/gremlin-server -f ./Dockerfile.gremlin . - @docker run -p 8182:8182 -t gremgo-neptune/gremlin-server diff --git a/vendor/github.com/ONSdigital/gremgo-neptune/README.md b/vendor/github.com/ONSdigital/gremgo-neptune/README.md deleted file mode 100644 index 5e55b1b1..00000000 --- a/vendor/github.com/ONSdigital/gremgo-neptune/README.md +++ /dev/null @@ -1,41 +0,0 @@ -# gremgo-neptune - -[![GoDoc](http://img.shields.io/badge/godoc-reference-blue.svg)](http://godoc.org/github.com/ONSdigital/gremgo-neptune) [![Build Status](https://travis-ci.org/ONSdigital/gremgo-neptune.svg?branch=master)](https://travis-ci.org/ONSdigital/gremgo-neptune) [![Go Report Card](https://goreportcard.com/badge/github.com/ONSdigital/gremgo-neptune)](https://goreportcard.com/report/github.com/ONSdigital/gremgo-neptune) - -gremgo-neptune is a fork of [qasaur/gremgo](https://github.com/qasaur/gremgo) with alterations to make it compatible with [AWS Neptune](https://aws.amazon.com/neptune/) which is a "Fast, reliable graph database built for the cloud". - -gremgo is a fast, efficient, and easy-to-use client for the TinkerPop graph database stack. It is a Gremlin language driver which uses WebSockets to interface with Gremlin Server and has a strong emphasis on concurrency and scalability. Please keep in mind that gremgo is still under heavy development and although effort is being made to fully cover gremgo with reliable tests, bugs may be present in several areas. - -**Modifications were made to `gremgo` in order to "support" AWS Neptune's lack of Gremlin-specific features, like no support for query bindings, among others. See differences in Gremlin support here: [AWS Neptune Gremlin Implementation Differences](https://docs.aws.amazon.com/neptune/latest/userguide/access-graph-gremlin-differences.html)** - -Installation -========== -``` -go get github.com/ONSdigital/gremgo-neptune -dep ensure -``` - -Development -==== - -If you amend the `dialer` interface, please run: -``` -go generate -``` - -Documentation -========== - -* [GoDoc](https://godoc.org/github.com/ONSdigital/gremgo-neptune) - -Examples - -- [simple example](examples/simple/main.go) -- [cursor example](examples/cursor/main.go) -- [authentication example](examples/authentication/main.go) - - The plugin accepts authentication creating a secure dialer where credentials are set. - If the server needs authentication and you do not provide the credentials the complement will panic. - -License -========== -See [LICENSE](LICENSE.md) diff --git a/vendor/github.com/ONSdigital/gremgo-neptune/TODO.md b/vendor/github.com/ONSdigital/gremgo-neptune/TODO.md deleted file mode 100644 index 756e9586..00000000 --- a/vendor/github.com/ONSdigital/gremgo-neptune/TODO.md +++ /dev/null @@ -1,8 +0,0 @@ -# Todo list for gremgo - -* Add tests for connection (WebSockets etc.) -* Timeout for response retrieval -* Fix error handling in write and read workers -* Write UUIDv4 generator to reduce reliance on external library -* Change WebSocket library from gorilla/websocket to net/websocket -* Create mock TinkerPop server for testing diff --git a/vendor/github.com/ONSdigital/gremgo-neptune/client.go b/vendor/github.com/ONSdigital/gremgo-neptune/client.go deleted file mode 100644 index 4e6a4505..00000000 --- a/vendor/github.com/ONSdigital/gremgo-neptune/client.go +++ /dev/null @@ -1,514 +0,0 @@ -package gremgo - -import ( - "bytes" - "context" - "fmt" - "io/ioutil" - "log" - "reflect" - "sync" - "time" - - "github.com/ONSdigital/graphson" - "github.com/pkg/errors" -) - -var ( - ErrorConnectionDisposed = errors.New("you cannot write on a disposed connection") - ErrorNoGraphTags = errors.New("does not contain any graph tags") - ErrorUnsupportedPropertyType = errors.New("unsupported property map value type") -) - -// Client is a container for the gremgo client. -type Client struct { - conn dialer - requests chan []byte - responses chan []byte - results *sync.Map - responseNotifier *sync.Map // responseNotifier notifies the requester that a response has been completed for the request - chunkNotifier *sync.Map // chunkNotifier contains channels per requestID (if using cursors) which notifies the requester that a partial response has arrived - sync.RWMutex - Errored bool -} - -// NewDialer returns a WebSocket dialer to use when connecting to Gremlin Server -func NewDialer(host string, configs ...DialerConfig) (dialer *Ws) { - dialer = &Ws{ - timeout: 15 * time.Second, - pingInterval: 60 * time.Second, - writingWait: 15 * time.Second, - readingWait: 15 * time.Second, - connected: false, - quit: make(chan struct{}), - } - - for _, conf := range configs { - conf(dialer) - } - - dialer.host = host - return dialer -} - -func newClient() (c Client) { - c.requests = make(chan []byte, 3) // c.requests takes any request and delivers it to the WriteWorker for dispatch to Gremlin Server - c.responses = make(chan []byte, 3) // c.responses takes raw responses from ReadWorker and delivers it for sorting to handleResponse - c.results = &sync.Map{} - c.responseNotifier = &sync.Map{} - c.chunkNotifier = &sync.Map{} - return -} - -// Dial returns a gremgo client for interaction with the Gremlin Server specified in the host IP. -func Dial(conn dialer, errs chan error) (c Client, err error) { - return DialCtx(context.Background(), conn, errs) -} - -// DialCtx returns a gremgo client for interaction with the Gremlin Server specified in the host IP. -func DialCtx(ctx context.Context, conn dialer, errs chan error) (c Client, err error) { - c = newClient() - c.conn = conn - - // Connects to Gremlin Server - err = conn.connectCtx(ctx) - if err != nil { - return - } - - msgChan := make(chan []byte, 200) - - go c.writeWorkerCtx(ctx, errs) - go c.readWorkerCtx(ctx, msgChan, errs) - go c.saveWorkerCtx(ctx, msgChan, errs) - go conn.pingCtx(ctx, errs) - - return -} - -func (c *Client) executeRequest(query string, bindings, rebindings map[string]string) (resp []Response, err error) { - return c.executeRequestCtx(context.Background(), query, bindings, rebindings) -} -func (c *Client) executeRequestCtx(ctx context.Context, query string, bindings, rebindings map[string]string) (resp []Response, err error) { - var req request - var id string - req, id, err = prepareRequest(query, bindings, rebindings) - if err != nil { - return - } - - msg, err := packageRequest(req) - if err != nil { - log.Println(err) - return - } - c.responseNotifier.Store(id, make(chan error, 1)) - c.dispatchRequestCtx(ctx, msg) - resp, err = c.retrieveResponseCtx(ctx, id) - if err != nil { - err = errors.Wrapf(err, "query: %s", query) - } - return -} -func (c *Client) executeRequestCursorCtx(ctx context.Context, query string, bindings, rebindings map[string]string) (cursor *Cursor, err error) { - var req request - var id string - if req, id, err = prepareRequest(query, bindings, rebindings); err != nil { - return - } - - var msg []byte - if msg, err = packageRequest(req); err != nil { - log.Println(err) - return - } - c.responseNotifier.Store(id, make(chan error, 1)) - c.chunkNotifier.Store(id, make(chan bool, 10)) - if c.dispatchRequestCtx(ctx, msg); err != nil { - err = errors.Wrap(err, "executeRequestCursorCtx") - return - } - - cursor = &Cursor{ - ID: id, - } - return -} - -func (c *Client) authenticate(requestID string) (err error) { - auth := c.conn.getAuth() - req, err := prepareAuthRequest(requestID, auth.username, auth.password) - if err != nil { - return - } - - msg, err := packageRequest(req) - if err != nil { - log.Println(err) - return - } - - c.dispatchRequest(msg) - return -} - -// Execute formats a raw Gremlin query, sends it to Gremlin Server, and returns the result. -func (c *Client) Execute(query string, bindings, rebindings map[string]string) (resp []Response, err error) { - return c.ExecuteCtx(context.Background(), query, bindings, rebindings) -} -func (c *Client) ExecuteCtx(ctx context.Context, query string, bindings, rebindings map[string]string) (resp []Response, err error) { - if c.conn.IsDisposed() { - return resp, ErrorConnectionDisposed - } - return c.executeRequestCtx(ctx, query, bindings, rebindings) -} - -// ExecuteFile takes a file path to a Gremlin script, sends it to Gremlin Server, and returns the result. -func (c *Client) ExecuteFile(path string, bindings, rebindings map[string]string) (resp []Response, err error) { - if c.conn.IsDisposed() { - return resp, ErrorConnectionDisposed - } - d, err := ioutil.ReadFile(path) // Read script from file - if err != nil { - log.Println(err) - return - } - query := string(d) - return c.executeRequest(query, bindings, rebindings) -} - -// Get formats a raw Gremlin query, sends it to Gremlin Server, and populates the passed []interface. -func (c *Client) Get(query string, bindings, rebindings map[string]string) (res []graphson.Vertex, err error) { - return c.GetCtx(context.Background(), query, bindings, rebindings) -} - -// GetCtx - execute a gremlin command and return the response as vertices -func (c *Client) GetCtx(ctx context.Context, query string, bindings, rebindings map[string]string) (res []graphson.Vertex, err error) { - if c.conn.IsDisposed() { - err = ErrorConnectionDisposed - return - } - - var resp []Response - resp, err = c.executeRequestCtx(ctx, query, bindings, rebindings) - if err != nil { - return - } - return c.deserializeResponseToVertices(resp) -} - -func (c *Client) deserializeResponseToVertices(resp []Response) (res []graphson.Vertex, err error) { - if len(resp) == 0 || resp[0].Status.Code == StatusNoContent { - return - } - - for _, item := range resp { - resN, err := graphson.DeserializeListOfVerticesFromBytes(item.Result.Data) - if err != nil { - panic(err) - } - res = append(res, resN...) - } - return -} - -// OpenStreamCursor initiates a query on the database, returning a stream cursor used to iterate over the results as they arrive. -// The provided query must only return a string list, as the Read() function on Stream explicitly handles string values. -func (c *Client) OpenStreamCursor(ctx context.Context, query string, bindings, rebindings map[string]string) (*Stream, error) { - if c.conn.IsDisposed() { - return nil, ErrorConnectionDisposed - } - basicCursor, err := c.executeRequestCursorCtx(ctx, query, bindings, rebindings) - return &Stream{ - cursor: basicCursor, - client: c, - }, err -} - -// OpenCursorCtx initiates a query on the database, returning a cursor used to iterate over the results as they arrive. -// The provided query must return a vertex or list of vertices in order for ReadCursorCtx to correctly format the results. -func (c *Client) OpenCursorCtx(ctx context.Context, query string, bindings, rebindings map[string]string) (cursor *Cursor, err error) { - if c.conn.IsDisposed() { - err = ErrorConnectionDisposed - return - } - return c.executeRequestCursorCtx(ctx, query, bindings, rebindings) -} - -// ReadCursorCtx returns the next set of results, deserialized as []Vertex, for the cursor -// - `res` may be empty when results were read by a previous call -// - `eof` will be true when no more results are available -func (c *Client) ReadCursorCtx(ctx context.Context, cursor *Cursor) (res []graphson.Vertex, eof bool, err error) { - var resp []Response - if resp, eof, err = c.retrieveNextResponseCtx(ctx, cursor); err != nil { - err = errors.Wrapf(err, "ReadCursorCtx: %s", cursor.ID) - return - } - - if res, err = c.deserializeResponseToVertices(resp); err != nil { - err = errors.Wrapf(err, "ReadCursorCtx: %s", cursor.ID) - return - } - return -} - -// GetE formats a raw Gremlin query, sends it to Gremlin Server, and populates the passed []interface. -func (c *Client) GetE(query string, bindings, rebindings map[string]string) (res []graphson.Edge, err error) { - return c.GetEdgeCtx(context.Background(), query, bindings, rebindings) -} -func (c *Client) GetEdgeCtx(ctx context.Context, query string, bindings, rebindings map[string]string) (res []graphson.Edge, err error) { - if c.conn.IsDisposed() { - err = ErrorConnectionDisposed - return - } - - resp, err := c.executeRequestCtx(ctx, query, bindings, rebindings) - if err != nil { - return - } - if len(resp) == 0 || resp[0].Status.Code == StatusNoContent { - return - } - - for _, item := range resp { - var resN []graphson.Edge - if resN, err = graphson.DeserializeListOfEdgesFromBytes(item.Result.Data); err != nil { - return - } - res = append(res, resN...) - } - return -} - -// GetCount returns the count element returned by an Execute() -func (c *Client) GetCount(query string, bindings, rebindings map[string]string) (i int64, err error) { - return c.GetCountCtx(context.Background(), query, bindings, rebindings) -} -func (c *Client) GetCountCtx(ctx context.Context, query string, bindings, rebindings map[string]string) (i int64, err error) { - var res []Response - if res, err = c.ExecuteCtx(ctx, query, bindings, rebindings); err != nil { - return - } - if len(res) > 1 { - err = errors.New("GetCount: expected one result, got more than one") - return - } else if len(res) == 0 { - err = errors.New("GetCount: expected one result, got zero") - return - } - if i, err = graphson.DeserializeNumber(res[0].Result.Data); err != nil { - return - } - return -} - -// GetStringList returns the list of string elements returned by an Execute() (e.g. from `...().properties('p').value()`) -func (c *Client) GetStringList(query string, bindings, rebindings map[string]string) (vals []string, err error) { - return c.GetStringListCtx(context.Background(), query, bindings, rebindings) -} -func (c *Client) GetStringListCtx(ctx context.Context, query string, bindings, rebindings map[string]string) (vals []string, err error) { - var res []Response - if res, err = c.ExecuteCtx(ctx, query, bindings, rebindings); err != nil { - return - } - for _, resN := range res { - var valsN []string - if valsN, err = graphson.DeserializeStringListFromBytes(resN.Result.Data); err != nil { - return - } - vals = append(vals, valsN...) - } - return -} - -// GetProperties returns a map of string to interface{} returned by an Execute() for vertex .properties() -func (c *Client) GetProperties(query string, bindings, rebindings map[string]string) (vals map[string][]interface{}, err error) { - return c.GetPropertiesCtx(context.Background(), query, bindings, rebindings) -} -func (c *Client) GetPropertiesCtx(ctx context.Context, query string, bindings, rebindings map[string]string) (vals map[string][]interface{}, err error) { - var res []Response - if res, err = c.ExecuteCtx(ctx, query, bindings, rebindings); err != nil { - return - } - vals = make(map[string][]interface{}) - for _, resN := range res { - if err = graphson.DeserializePropertiesFromBytes(resN.Result.Data, vals); err != nil { - return - } - } - return -} - -// GremlinForVertex returns the addV()... and V()... gremlin commands for `data` -// Because of possible multiples, it does not start with `g.` (it probably should? XXX ) -// (largely taken from https://github.com/intwinelabs/gremgoser) -func GremlinForVertex(label string, data interface{}) (gremAdd, gremGet string, err error) { - gremAdd = fmt.Sprintf("addV('%s')", label) - gremGet = fmt.Sprintf("V('%s')", label) - - d := reflect.ValueOf(data) - id := d.FieldByName("Id") - if id.IsValid() { - if idField, ok := d.Type().FieldByName("Id"); ok { - tag := idField.Tag.Get("graph") - name, opts := parseTag(tag) - if len(name) == 0 && len(opts) == 0 { - gremAdd += fmt.Sprintf(".property(id,'%s')", id) - gremGet += fmt.Sprintf(".hasId('%s')", id) - } - } - } - - missingTag := true - - for i := 0; i < d.NumField(); i++ { - tag := d.Type().Field(i).Tag.Get("graph") - name, opts := parseTag(tag) - if (len(name) == 0 || name == "-") && len(opts) == 0 { - continue - } - missingTag = false - val := d.Field(i).Interface() - if len(opts) == 0 { - err = fmt.Errorf("interface field tag %q does not contain a tag option type, field type: %T", name, val) - return - } - if !d.Field(i).IsValid() { - continue - } - if opts.Contains("id") { - if val != "" { - gremAdd += fmt.Sprintf(".property(id,'%s')", val) - gremGet += fmt.Sprintf(".hasId('%s')", val) - } - } else if opts.Contains("string") { - if val != "" { - gremAdd += fmt.Sprintf(".property('%s','%s')", name, escapeStringy(val.(string))) - gremGet += fmt.Sprintf(".has('%s','%s')", name, escapeStringy(val)) - } - } else if opts.Contains("bool") || opts.Contains("number") || opts.Contains("other") { - gremAdd += fmt.Sprintf(".property('%s',%v)", name, val) - gremGet += fmt.Sprintf(".has('%s',%v)", name, val) - } else if opts.Contains("[]string") { - s := reflect.ValueOf(val) - for i := 0; i < s.Len(); i++ { - gremAdd += fmt.Sprintf(".property('%s','%s')", name, escapeStringy(s.Index(i).Interface())) - gremGet += fmt.Sprintf(".has('%s','%s')", name, escapeStringy(s.Index(i).Interface())) - } - } else if opts.Contains("[]bool") || opts.Contains("[]number") || opts.Contains("[]other") { - s := reflect.ValueOf(val) - for i := 0; i < s.Len(); i++ { - gremAdd += fmt.Sprintf(".property('%s',%v)", name, s.Index(i).Interface()) - gremGet += fmt.Sprintf(".has('%s',%v)", name, s.Index(i).Interface()) - } - } else { - err = fmt.Errorf("interface field tag needs recognised option, field: %q, tag: %q", d.Type().Field(i).Name, tag) - return - } - } - - if missingTag { - // this err is effectively a warning for gremGet (can be ignored, unless no Id) - err = ErrorNoGraphTags - return - } - return -} - -// AddV takes a label and an interface and adds it as a vertex to the graph -func (c *Client) AddV(label string, data interface{}, bindings, rebindings map[string]string) (vert graphson.Vertex, err error) { - return c.AddVertexCtx(context.Background(), label, data, bindings, rebindings) -} -func (c *Client) AddVertexCtx(ctx context.Context, label string, data interface{}, bindings, rebindings map[string]string) (vert graphson.Vertex, err error) { - if c.conn.IsDisposed() { - return vert, ErrorConnectionDisposed - } - - q, _, err := GremlinForVertex(label, data) - if err != nil && err != ErrorNoGraphTags { - panic(err) // XXX - } - q = "g." + q - - var resp []Response - if resp, err = c.ExecuteCtx(ctx, q, bindings, rebindings); err != nil { - return - } - - if len(resp) != 1 { - return vert, fmt.Errorf("AddV should receive 1 response, got %d", len(resp)) - } - - for _, res := range resp { // XXX one result, so should not need loop - var result []graphson.Vertex - if result, err = graphson.DeserializeListOfVerticesFromBytes(res.Result.Data); err != nil { - return - } - if len(result) != 1 { - return vert, fmt.Errorf("AddV should receive 1 vertex, got %d", len(result)) - } - - vert = result[0] - } - return -} - -// AddE takes a label, from UUID and to UUID (and optional props map) and creates an edge between the two vertex in the graph -func (c *Client) AddE(label, fromId, toId string, props map[string]interface{}) (resp interface{}, err error) { - return c.AddEdgeCtx(context.Background(), label, fromId, toId, props) -} -func (c *Client) AddEdgeCtx(ctx context.Context, label, fromId, toId string, props map[string]interface{}) (resp interface{}, err error) { - if c.conn.IsDisposed() { - return nil, ErrorConnectionDisposed - } - - var propStr string - if propStr, err = buildProps(props); err != nil { - return - } - q := fmt.Sprintf("g.addE('%s').from(g.V().hasId('%s')).to(g.V().hasId('%s'))%s", label, fromId, toId, propStr) - resp, err = c.ExecuteCtx(ctx, q, nil, nil) - return -} - -// Close closes the underlying connection and marks the client as closed. -func (c *Client) Close() { - if c.conn != nil { - c.conn.close() - } -} - -// buildProps converts a map[string]interfaces to be used as properties on an edge -// (largely taken from https://github.com/intwinelabs/gremgoser) -func buildProps(props map[string]interface{}) (q string, err error) { - for k, v := range props { - t := reflect.ValueOf(v).Kind() - if t == reflect.String { - q += fmt.Sprintf(".property('%s', '%s')", k, escapeStringy(v)) - } else if t == reflect.Bool || t == reflect.Int || t == reflect.Int8 || t == reflect.Int16 || t == reflect.Int32 || t == reflect.Int64 || t == reflect.Uint || t == reflect.Uint8 || t == reflect.Uint16 || t == reflect.Uint32 || t == reflect.Uint64 || t == reflect.Float32 || t == reflect.Float64 { - q += fmt.Sprintf(".property('%s', %v)", k, v) - } else if t == reflect.Slice { - s := reflect.ValueOf(v) - for i := 0; i < s.Len(); i++ { - q += fmt.Sprintf(".property('%s', '%s')", k, escapeStringy(s.Index(i).Interface())) - } - } else { - return "", ErrorUnsupportedPropertyType - } - } - return -} - -// escapeStringy takes a string and escapes some characters -// (largely taken from https://github.com/intwinelabs/gremgoser) -func escapeStringy(stringy interface{}) string { - var buf bytes.Buffer - for _, char := range stringy.(string) { - switch char { - case '\'', '"', '\\': - buf.WriteRune('\\') - } - buf.WriteRune(char) - } - return buf.String() -} diff --git a/vendor/github.com/ONSdigital/gremgo-neptune/configuration.go b/vendor/github.com/ONSdigital/gremgo-neptune/configuration.go deleted file mode 100644 index 8ed566d9..00000000 --- a/vendor/github.com/ONSdigital/gremgo-neptune/configuration.go +++ /dev/null @@ -1,42 +0,0 @@ -package gremgo - -import "time" - -//DialerConfig is the struct for defining configuration for WebSocket dialer -type DialerConfig func(*Ws) - -//SetAuthentication sets on dialer credentials for authentication -func SetAuthentication(username string, password string) DialerConfig { - return func(c *Ws) { - c.auth = &auth{username: username, password: password} - } -} - -//SetTimeout sets the dial timeout -func SetTimeout(seconds int) DialerConfig { - return func(c *Ws) { - c.timeout = time.Duration(seconds) * time.Second - } -} - -//SetPingInterval sets the interval of ping sending for know is -//connection is alive and in consequence the client is connected -func SetPingInterval(seconds int) DialerConfig { - return func(c *Ws) { - c.pingInterval = time.Duration(seconds) * time.Second - } -} - -//SetWritingWait sets the time for waiting that writing occur -func SetWritingWait(seconds int) DialerConfig { - return func(c *Ws) { - c.writingWait = time.Duration(seconds) * time.Second - } -} - -//SetReadingWait sets the time for waiting that reading occur -func SetReadingWait(seconds int) DialerConfig { - return func(c *Ws) { - c.readingWait = time.Duration(seconds) * time.Second - } -} diff --git a/vendor/github.com/ONSdigital/gremgo-neptune/connection.go b/vendor/github.com/ONSdigital/gremgo-neptune/connection.go deleted file mode 100644 index d2d16ca4..00000000 --- a/vendor/github.com/ONSdigital/gremgo-neptune/connection.go +++ /dev/null @@ -1,270 +0,0 @@ -package gremgo - -import ( - "context" - "net/http" - "sync" - "time" - - "github.com/gorilla/websocket" - "github.com/pkg/errors" -) - -//go:generate moq -out dialer_moq_test.go . dialer - -type dialer interface { - connect() error - connectCtx(context.Context) error - IsConnected() bool - IsDisposed() bool - write([]byte) error - read() (int, []byte, error) - readCtx(context.Context, chan message) - close() error - getAuth() *auth - ping(errs chan error) - pingCtx(context.Context, chan error) -} - -///// -/* -WebSocket Connection -*/ -///// - -// Ws is the dialer for a WebSocket connection -type Ws struct { - host string - conn *websocket.Conn - auth *auth - disposed bool - connected bool - pingInterval time.Duration - writingWait time.Duration - readingWait time.Duration - timeout time.Duration - quit chan struct{} - sync.RWMutex -} - -//Auth is the container for authentication data of dialer -type auth struct { - username string - password string -} - -func (ws *Ws) connect() (err error) { - return ws.connectCtx(context.Background()) -} - -func (ws *Ws) connectCtx(ctx context.Context) (err error) { - d := websocket.Dialer{ - WriteBufferSize: 512 * 1024, - ReadBufferSize: 512 * 1024, - HandshakeTimeout: 5 * time.Second, // Timeout or else we'll hang forever and never fail on bad hosts. - } - ws.conn, _, err = d.DialContext(ctx, ws.host, http.Header{}) - if err != nil { - return - } - ws.connected = true - ws.conn.SetPongHandler(func(appData string) error { - ws.Lock() - ws.connected = true - ws.Unlock() - return nil - }) - return -} - -// IsConnected returns whether the underlying websocket is connected -func (ws *Ws) IsConnected() bool { - return ws.connected -} - -// IsDisposed returns whether the underlying websocket is disposed -func (ws *Ws) IsDisposed() bool { - return ws.disposed -} - -func (ws *Ws) write(msg []byte) (err error) { - // XXX want to do locking here? - // ws.RWMutex.Lock() - // defer ws.RWMutex.Unlock() - err = ws.conn.WriteMessage(websocket.BinaryMessage, msg) - return -} - -func (ws *Ws) read() (msgType int, msg []byte, err error) { - // XXX want to do locking here? - // ws.RWMutex.RLock() - // defer ws.RWMutex.RUnlock() - msgType, msg, err = ws.conn.ReadMessage() - return -} - -func (ws *Ws) readCtx(ctx context.Context, rxMsgChan chan message) { - // XXX want to do locking here? - // ws.RWMutex.RLock() - // defer ws.RWMutex.RUnlock() - for { - select { - case <-ctx.Done(): - return - default: - msgType, msg, err := ws.conn.ReadMessage() - rxMsgChan <- message{msgType, msg, err} - if msgType == -1 { - return - } - } - } -} - -func (ws *Ws) close() (err error) { - defer func() { - close(ws.quit) - ws.conn.Close() - ws.disposed = true - }() - - // XXX want to do locking here? - // ws.RWMutex.Lock() - // defer ws.RWMutex.Unlock() - err = ws.conn.WriteMessage(websocket.CloseMessage, websocket.FormatCloseMessage(websocket.CloseNormalClosure, "")) //Cleanly close the connection with the server - return -} - -func (ws *Ws) getAuth() *auth { - if ws.auth == nil { - panic("You must create a Secure Dialer for authenticating with the server") - } - return ws.auth -} - -func (ws *Ws) ping(errs chan error) { - ws.pingCtx(context.Background(), errs) -} - -func (ws *Ws) pingCtx(ctx context.Context, errs chan error) { - ticker := time.NewTicker(ws.pingInterval) - defer ticker.Stop() - for { - select { - case <-ticker.C: - connected := true - if err := ws.conn.WriteControl(websocket.PingMessage, []byte{}, time.Now().Add(ws.writingWait)); err != nil { - errs <- err - connected = false - } - ws.Lock() - ws.connected = connected - ws.Unlock() - case <-ctx.Done(): - return - case <-ws.quit: - return - } - } -} - -// writeWorker works on a loop and dispatches messages as soon as it receives them -func (c *Client) writeWorker(errs chan error, quit chan struct{}) { - for { - select { - case msg := <-c.requests: - c.Lock() - err := c.conn.write(msg) - if err != nil { - errs <- err - c.Errored = true - c.Unlock() - break - } - c.Unlock() - - case <-quit: - return - } - } -} - -// writeWorkerCtx works on a loop and dispatches messages as soon as it receives them -func (c *Client) writeWorkerCtx(ctx context.Context, errs chan error) { - for { - select { - case msg := <-c.requests: - c.Lock() - err := c.conn.write(msg) - if err != nil { - errs <- err - c.Errored = true - c.Unlock() - break - } - c.Unlock() - - case <-ctx.Done(): - return - } - } -} - -func (c *Client) readWorker(errs chan error, quit chan struct{}) { // readWorker works on a loop and sorts messages as soon as it receives them - for { - msgType, msg, err := c.conn.read() - if msgType == -1 { // msgType == -1 is noFrame (close connection) - return - } - if err != nil { - errs <- errors.Wrapf(err, "Receive message type: %d", msgType) - c.Errored = true - break - } - if msg != nil { - if err = c.handleResponse(msg); err != nil { - // XXX this makes the err fatal - errs <- errors.Wrapf(err, "handleResponse fail: %q", msg) - c.Errored = true - } - } - - select { - case <-quit: - return - default: - continue - } - } -} - -type message struct { - mType int - msg []byte - err error -} - -// readWorkerCtx works on a loop and sorts read messages as soon as it receives them -func (c *Client) readWorkerCtx(ctx context.Context, msgs chan []byte, errs chan error) { - receivedMsgChan := make(chan message, 100) - go c.conn.readCtx(ctx, receivedMsgChan) - - for i := 0; ; i++ { - select { - case <-ctx.Done(): - return - case msg := <-receivedMsgChan: - if msg.mType == -1 { // msgType == -1 is noFrame (close connection) - return - } - if msg.err != nil { - errs <- errors.Wrapf(msg.err, "Receive message type: %d", msg.mType) - c.Errored = true - return - } - if msg.msg != nil { - msgs <- msg.msg - } - } - } -} diff --git a/vendor/github.com/ONSdigital/gremgo-neptune/cursor.go b/vendor/github.com/ONSdigital/gremgo-neptune/cursor.go deleted file mode 100644 index 097850c1..00000000 --- a/vendor/github.com/ONSdigital/gremgo-neptune/cursor.go +++ /dev/null @@ -1,86 +0,0 @@ -package gremgo - -import ( - "context" - "io" - "net/http" - - "github.com/ONSdigital/graphson" - "github.com/pkg/errors" -) - -// Cursor allows for results to be iterated over as soon as available, rather than waiting for -// a query to complete and all results to be returned in one block. -type Cursor struct { - ID string -} - -// Stream is a specific implementation of a Cursor, which iterates over results from a cursor but -// only works on queries which return a list of strings. This is designed for returning what would -// be considered 'rows' of data in other contexts. -type Stream struct { - cursor *Cursor - eof bool - buffer []string - client *Client -} - -// Read a string response from the stream cursor, reading from the buffer of previously retrieved responses -// when possible. When the buffer is empty, Read uses the stream's client to retrieve further -// responses from the database. -func (s *Stream) Read() (string, error) { - if len(s.buffer) == 0 { - if s.eof { - return "", io.EOF - } - - if err := s.refillBuffer(); err != nil { - return "", err - } - } - - var row string - row, s.buffer = s.buffer[0], s.buffer[1:] - row += "\n" - - return row, nil - -} - -func (s *Stream) refillBuffer() error { - var resp []Response - var err error - - for resp == nil && !s.eof { //resp could be empty if reading too quickly - if resp, s.eof, err = s.client.retrieveNextResponseCtx(context.Background(), s.cursor); err != nil { - return errors.Wrapf(err, "cursor.Read: %s", s.cursor.ID) - } - - if len(resp) > 1 { - return errors.New("too many results in cursor response") - } - - //gremlin has returned a validly formed 'no content' response - if len(resp) == 1 && &resp[0].Status != nil && resp[0].Status.Code == http.StatusNoContent { - s.eof = true - return io.EOF - } - } - - if s.buffer, err = graphson.DeserializeStringListFromBytes(resp[0].Result.Data); err != nil { - return err - } - - if len(s.buffer) == 0 { - return errors.New("no results deserialized") - } - - return nil -} - -// Close satisfies the Closer interface. The stream does not need to close any -// resources, as the contained client holds the connection and is responsible -// for closing its own resources. -func (s *Stream) Close(ctx context.Context) error { - return nil -} diff --git a/vendor/github.com/ONSdigital/gremgo-neptune/go.mod b/vendor/github.com/ONSdigital/gremgo-neptune/go.mod deleted file mode 100644 index 2ef7ab83..00000000 --- a/vendor/github.com/ONSdigital/gremgo-neptune/go.mod +++ /dev/null @@ -1,9 +0,0 @@ -module github.com/ONSdigital/gremgo-neptune - -require ( - github.com/ONSdigital/graphson v0.0.0-20190718134034-c13ceacd109d - github.com/gofrs/uuid v3.2.0+incompatible - github.com/gorilla/websocket v1.4.0 - github.com/pkg/errors v0.8.1 - github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a -) diff --git a/vendor/github.com/ONSdigital/gremgo-neptune/go.sum b/vendor/github.com/ONSdigital/gremgo-neptune/go.sum deleted file mode 100644 index c16ab732..00000000 --- a/vendor/github.com/ONSdigital/gremgo-neptune/go.sum +++ /dev/null @@ -1,25 +0,0 @@ -github.com/ONSdigital/graphson v0.0.0-20190531092426-d39cb8fe4384 h1:tQOaBPntKLKJZYNTqT6YwE9fXZZLD0jBrke18nAJV5w= -github.com/ONSdigital/graphson v0.0.0-20190531092426-d39cb8fe4384/go.mod h1:zQ+8pTnCLGuy4eUek81pWUxZo4/f71ri3VYz97Wby+4= -github.com/ONSdigital/graphson v0.0.0-20190717101729-324718b3a644 h1:qlXGwzq+X2DUd0iOYmkXwnSxYDeU1efFwp7sUXASjO0= -github.com/ONSdigital/graphson v0.0.0-20190717101729-324718b3a644/go.mod h1:zQ+8pTnCLGuy4eUek81pWUxZo4/f71ri3VYz97Wby+4= -github.com/ONSdigital/graphson v0.0.0-20190718134034-c13ceacd109d h1:yrCtEGlohmA3OnXtke0nOOp/m9O83orpSnTGOfYOw1Q= -github.com/ONSdigital/graphson v0.0.0-20190718134034-c13ceacd109d/go.mod h1:zQ+8pTnCLGuy4eUek81pWUxZo4/f71ri3VYz97Wby+4= -github.com/gofrs/uuid v3.2.0+incompatible h1:y12jRkkFxsd7GpqdSZ+/KCs/fJbqpEXSGd4+jfEaewE= -github.com/gofrs/uuid v3.2.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= -github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8= -github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= -github.com/gorilla/websocket v1.4.0 h1:WDFjx/TMzVgy9VdMMQi2K2Emtwi2QcUQsztZ/zLaH/Q= -github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= -github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo= -github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= -github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I= -github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM= -github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= -github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a h1:pa8hGb/2YqsZKovtsgrwcDH1RZhVbTKCjLp47XpqCDs= -github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= -golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= diff --git a/vendor/github.com/ONSdigital/gremgo-neptune/pool.go b/vendor/github.com/ONSdigital/gremgo-neptune/pool.go deleted file mode 100644 index 41807159..00000000 --- a/vendor/github.com/ONSdigital/gremgo-neptune/pool.go +++ /dev/null @@ -1,535 +0,0 @@ -package gremgo - -import ( - "context" - "io/ioutil" - "log" - "sync" - "time" - - "github.com/ONSdigital/graphson" - "github.com/pkg/errors" -) - -const connRequestQueueSize = 1000000 - -// errors -var ( - ErrGraphDBClosed = errors.New("graphdb is closed") - ErrBadConn = errors.New("bad conn") -) - -// Pool maintains a list of connections. -type Pool struct { - MaxOpen int - MaxLifetime time.Duration - dial func() (*Client, error) - mu sync.Mutex - freeConns []*conn - open int - openerCh chan struct{} - connRequests map[uint64]chan connRequest - nextRequest uint64 - cleanerCh chan struct{} - closed bool -} - -// NewPool create ConnectionPool -func NewPool(dial func() (*Client, error)) *Pool { - p := new(Pool) - p.dial = dial - p.openerCh = make(chan struct{}, connRequestQueueSize) - p.connRequests = make(map[uint64]chan connRequest) - - go p.opener() - - return p -} - -// NewPoolWithDialerCtx returns a NewPool that uses a contextual dialer to dbURL, -// errs is a chan that receives any errors from the ping/read/write workers for the connection -func NewPoolWithDialerCtx(ctx context.Context, dbURL string, errs chan error, cfgs ...DialerConfig) *Pool { - dialFunc := func() (*Client, error) { - dialer := NewDialer(dbURL, cfgs...) - cli, err := DialCtx(ctx, dialer, errs) - return &cli, err - } - return NewPool(dialFunc) -} - -type connRequest struct { - *conn - err error -} - -// conn represents a shared and reusable connection. -type conn struct { - Pool *Pool - Client *Client - t time.Time -} - -// maybeOpenNewConnections initiates new connections if capacity allows (must be locked) -func (p *Pool) maybeOpenNewConnections() { - if p.closed { - return - } - numRequests := len(p.connRequests) - if p.MaxOpen > 0 { - numCanOpen := p.MaxOpen - p.open - if numRequests > numCanOpen { - numRequests = numCanOpen - } - } - for numRequests > 0 { - p.open++ - numRequests-- - p.openerCh <- struct{}{} - } -} - -func (p *Pool) opener() { - for range p.openerCh { - if err := p.openNewConnection(); err != nil { - // gutil.WarnLev(1, "failed opener "+err.Error()) XXX - } - } -} - -type so struct { - tryOpening bool - alreadyLocked bool - conn *conn -} - -// subtractOpen reduces p.open (count), unlocks. Optionally: locks, maybeOpenNewConnections, conn.Client.Close -func (p *Pool) subtractOpen(opts so, err error) error { - if !opts.alreadyLocked { - p.mu.Lock() - } - p.open-- - if opts.tryOpening { - p.maybeOpenNewConnections() - } - p.mu.Unlock() - if opts.conn != nil { - opts.conn.Client.Close() - } - return err -} - -func (p *Pool) openNewConnection() (err error) { - if p.closed { - return p.subtractOpen(so{}, errors.Errorf("failed to openNewConnection - pool closed")) - } - var c *Client - c, err = p.dial() - if err != nil { - return p.subtractOpen(so{tryOpening: true}, errors.Wrapf(err, "failed to openNewConnection - dial")) - } - cn := &conn{ - Pool: p, - Client: c, - t: time.Now(), - } - p.mu.Lock() - if !p.putConnLocked(cn, nil) { - return p.subtractOpen(so{alreadyLocked: true, conn: cn}, errors.Errorf("failed to openNewConnection - connLocked")) - } - p.mu.Unlock() - return -} - -// putConn releases a connection back to the connection pool. -func (p *Pool) putConn(cn *conn, err error) error { - p.mu.Lock() - if !p.putConnLocked(cn, err) { - return p.subtractOpen(so{alreadyLocked: true, conn: cn}, err) - } - p.mu.Unlock() - return err -} - -// putConnLocked releases a connection back to the connection pool (must be locked) -// returns false when unable to do so (pool is closed, open is at max) -func (p *Pool) putConnLocked(cn *conn, err error) bool { - if p.closed { - return false - } - if p.MaxOpen > 0 && p.open >= p.MaxOpen { - return false - } - if len(p.connRequests) > 0 { - var req chan connRequest - var reqKey uint64 - for reqKey, req = range p.connRequests { - break - } - delete(p.connRequests, reqKey) - req <- connRequest{ - conn: cn, - err: err, - } - } else { - p.freeConns = append(p.freeConns, cn) - p.startCleanerLocked() - } - return true -} - -// conn will return an available pooled connection. Either an idle connection or -// by dialing a new one if the pool does not currently have a maximum number -// of active connections. -func (p *Pool) conn() (*conn, error) { - ctx := context.Background() - return p.connCtx(ctx) -} -func (p *Pool) connCtx(ctx context.Context) (*conn, error) { - cn, err := p._conn(ctx, true) - if err == nil { - return cn, nil - } - if errors.Cause(err) == ErrBadConn { - return p._conn(ctx, false) - } - return cn, err -} - -func (p *Pool) _conn(ctx context.Context, useFreeConn bool) (*conn, error) { - p.mu.Lock() - if p.closed { - p.mu.Unlock() - return nil, ErrGraphDBClosed - } - // Check if the context is expired. - select { - default: - case <-ctx.Done(): - p.mu.Unlock() - return nil, errors.Wrap(ctx.Err(), "the context is expired") - } - - var pc *conn - numFree := len(p.freeConns) - if useFreeConn && numFree > 0 { - pc = p.freeConns[0] - copy(p.freeConns, p.freeConns[1:]) - p.freeConns = p.freeConns[:numFree-1] - p.mu.Unlock() - if pc.expired(p.MaxLifetime) { - return nil, p.subtractOpen(so{conn: pc}, ErrBadConn) - } - return pc, nil - } - - if p.MaxOpen > 0 && p.MaxOpen <= p.open { - req := make(chan connRequest, 1) - reqKey := p.nextRequest - p.nextRequest++ - p.connRequests[reqKey] = req - p.mu.Unlock() - - select { - // timeout - case <-ctx.Done(): - // Remove the connection request and ensure no value has been sent - // on it after removing. - p.mu.Lock() - delete(p.connRequests, reqKey) - p.mu.Unlock() - select { - case ret, ok := <-req: - if ok { - p.putConn(ret.conn, ret.err) - } - default: - } - return nil, errors.Wrap(ctx.Err(), "Deadline of connRequests exceeded") - case ret, ok := <-req: - if !ok { - return nil, ErrGraphDBClosed - } - if ret.err != nil { - return ret.conn, errors.Wrap(ret.err, "Response has an error") - } - return ret.conn, nil - } - } - - p.open++ - p.mu.Unlock() - newCn, err := p.dial() - if err != nil { - return nil, p.subtractOpen(so{tryOpening: true}, errors.Wrap(err, "Failed newConn")) - } - return &conn{ - Pool: p, - Client: newCn, - t: time.Now(), - }, nil -} - -func (p *Pool) needStartCleaner() bool { - return p.MaxLifetime > 0 && - p.open > 0 && - p.cleanerCh == nil -} - -// startCleanerLocked starts connectionCleaner if needed. -func (p *Pool) startCleanerLocked() { - if p.needStartCleaner() { - p.cleanerCh = make(chan struct{}, 1) - go p.connectionCleaner() - } -} - -func (p *Pool) connectionCleaner() { - const minInterval = time.Second - - d := p.MaxLifetime - if d < minInterval { - d = minInterval - } - t := time.NewTimer(d) - - for { - select { - case <-t.C: - case <-p.cleanerCh: // dbclient was closed. - } - - ml := p.MaxLifetime - p.mu.Lock() - if p.closed || len(p.freeConns) == 0 || ml <= 0 { - p.cleanerCh = nil - p.mu.Unlock() - return - } - n := time.Now() - mlExpiredSince := n.Add(-ml) - var closing []*conn - for i := 0; i < len(p.freeConns); i++ { - pc := p.freeConns[i] - if (ml > 0 && pc.t.Before(mlExpiredSince)) || - pc.Client.Errored { - p.open-- - closing = append(closing, pc) - last := len(p.freeConns) - 1 - p.freeConns[i] = p.freeConns[last] - p.freeConns[last] = nil - p.freeConns = p.freeConns[:last] - i-- - } - } - p.mu.Unlock() - - for _, pc := range closing { - if pc.Client != nil { - pc.Client.Close() - } - } - - t.Reset(d) - } -} - -// Execute formats a raw Gremlin query, sends it to Gremlin Server, and returns the result. -func (p *Pool) Execute(query string, bindings, rebindings map[string]string) (resp []Response, err error) { - return p.ExecuteCtx(context.Background(), query, bindings, rebindings) -} -func (p *Pool) ExecuteCtx(ctx context.Context, query string, bindings, rebindings map[string]string) (resp []Response, err error) { - pc, err := p.conn() - if err != nil { - return resp, errors.Wrap(err, "Failed p.conn") - } - defer func() { - p.putConn(pc, err) - }() - resp, err = pc.Client.executeRequestCtx(ctx, query, bindings, rebindings) - return -} - -// ExecuteFile takes a file path to a Gremlin script, sends it to Gremlin Server, and returns the result. -func (p *Pool) ExecuteFile(path string, bindings, rebindings map[string]string) (resp []Response, err error) { - pc, err := p.conn() - if err != nil { - return resp, errors.Wrap(err, "Failed p.conn") - } - defer func() { - p.putConn(pc, err) - }() - d, err := ioutil.ReadFile(path) // Read script from file - if err != nil { - log.Println(err) - return - } - query := string(d) - resp, err = pc.Client.executeRequest(query, bindings, rebindings) - return -} - -// AddV -func (p *Pool) AddV(label string, i interface{}, bindings, rebindings map[string]string) (resp graphson.Vertex, err error) { - return p.AddVertexCtx(context.Background(), label, i, bindings, rebindings) -} -func (p *Pool) AddVertexCtx(ctx context.Context, label string, i interface{}, bindings, rebindings map[string]string) (resp graphson.Vertex, err error) { - var pc *conn - if pc, err = p.conn(); err != nil { - return resp, errors.Wrap(err, "Failed p.conn") - } - defer p.putConn(pc, err) - return pc.Client.AddVertexCtx(ctx, label, i, bindings, rebindings) -} - -// Get -func (p *Pool) Get(query string, bindings, rebindings map[string]string) (resp []graphson.Vertex, err error) { - var pc *conn - if pc, err = p.conn(); err != nil { - return resp, errors.Wrap(err, "Failed p.conn") - } - defer p.putConn(pc, err) - return pc.Client.Get(query, bindings, rebindings) -} - -// GetCtx -func (p *Pool) GetCtx(ctx context.Context, query string, bindings, rebindings map[string]string) (resp []graphson.Vertex, err error) { - var pc *conn - if pc, err = p.connCtx(ctx); err != nil { - return resp, errors.Wrap(err, "GetCtx: Failed p.connCtx") - } - defer p.putConn(pc, err) - return pc.Client.GetCtx(ctx, query, bindings, rebindings) -} - -// OpenStreamCursor initiates a query on the database, returning a stream to iterate over the results -func (p *Pool) OpenStreamCursor(ctx context.Context, query string, bindings, rebindings map[string]string) (stream *Stream, err error) { - var pc *conn - if pc, err = p.connCtx(ctx); err != nil { - err = errors.Wrap(err, "OpenStreamCursor: Failed p.connCtx") - return - } - defer p.putConn(pc, err) - return pc.Client.OpenStreamCursor(ctx, query, bindings, rebindings) -} - -// OpenCursorCtx initiates a query on the database, returning a cursor to iterate over the results -func (p *Pool) OpenCursorCtx(ctx context.Context, query string, bindings, rebindings map[string]string) (cursor *Cursor, err error) { - var pc *conn - if pc, err = p.connCtx(ctx); err != nil { - err = errors.Wrap(err, "GetCursorCtx: Failed p.connCtx") - return - } - defer p.putConn(pc, err) - return pc.Client.OpenCursorCtx(ctx, query, bindings, rebindings) -} - -// ReadCursorCtx returns the next set of results for the cursor -// - `res` returns vertices (and may be empty when results were read by a previous call - this is normal) -// - `eof` will be true when no more results are available (`res` may still have results) -func (p *Pool) ReadCursorCtx(ctx context.Context, cursor *Cursor) (res []graphson.Vertex, eof bool, err error) { - var pc *conn - if pc, err = p.connCtx(ctx); err != nil { - err = errors.Wrap(err, "NextCtx: Failed p.connCtx") - return - } - defer p.putConn(pc, err) - return pc.Client.ReadCursorCtx(ctx, cursor) -} - -// AddE -func (p *Pool) AddE(label, fromId, toId string, props map[string]interface{}) (resp interface{}, err error) { - return p.AddEdgeCtx(context.Background(), label, fromId, toId, props) -} - -func (p *Pool) AddEdgeCtx(ctx context.Context, label, fromId, toId string, props map[string]interface{}) (resp interface{}, err error) { - // AddEdgeCtx - var pc *conn - if pc, err = p.conn(); err != nil { - return resp, errors.Wrap(err, "Failed p.conn") - } - defer p.putConn(pc, err) - return pc.Client.AddEdgeCtx(ctx, label, fromId, toId, props) -} - -// GetE -func (p *Pool) GetE(q string, bindings, rebindings map[string]string) (resp interface{}, err error) { - return p.GetEdgeCtx(context.Background(), q, bindings, rebindings) -} - -func (p *Pool) GetEdgeCtx(ctx context.Context, q string, bindings, rebindings map[string]string) (resp interface{}, err error) { - var pc *conn - if pc, err = p.conn(); err != nil { - return resp, errors.Wrap(err, "Failed p.conn") - } - defer p.putConn(pc, err) - return pc.Client.GetEdgeCtx(ctx, q, bindings, rebindings) -} - -func (p *Pool) GetCount(q string, bindings, rebindings map[string]string) (i int64, err error) { - return p.GetCountCtx(context.Background(), q, bindings, rebindings) -} -func (p *Pool) GetCountCtx(ctx context.Context, q string, bindings, rebindings map[string]string) (i int64, err error) { - var pc *conn - if pc, err = p.conn(); err != nil { - return 0, errors.Wrap(err, "Failed p.conn") - } - defer p.putConn(pc, err) - return pc.Client.GetCountCtx(ctx, q, bindings, rebindings) -} - -func (p *Pool) GetStringList(q string, bindings, rebindings map[string]string) (vals []string, err error) { - return p.GetStringListCtx(context.Background(), q, bindings, rebindings) -} -func (p *Pool) GetStringListCtx(ctx context.Context, q string, bindings, rebindings map[string]string) (vals []string, err error) { - var pc *conn - if pc, err = p.conn(); err != nil { - err = errors.Wrap(err, "GetStringListCtx: Failed p.conn") - return - } - defer p.putConn(pc, err) - return pc.Client.GetStringListCtx(ctx, q, bindings, rebindings) -} - -// GetProperties returns a map of vertex properties -func (p *Pool) GetProperties(q string, bindings, rebindings map[string]string) (vals map[string][]interface{}, err error) { - return p.GetPropertiesCtx(context.Background(), q, bindings, rebindings) -} -func (p *Pool) GetPropertiesCtx(ctx context.Context, q string, bindings, rebindings map[string]string) (vals map[string][]interface{}, err error) { - var pc *conn - if pc, err = p.conn(); err != nil { - err = errors.Wrap(err, "GetPropertiesCtx: Failed p.conn") - return - } - defer p.putConn(pc, err) - return pc.Client.GetPropertiesCtx(ctx, q, bindings, rebindings) -} - -// Close closes the pool. -func (p *Pool) Close() { - p.mu.Lock() - - close(p.openerCh) - if p.cleanerCh != nil { - close(p.cleanerCh) - } - for _, cr := range p.connRequests { - close(cr) - } - p.closed = true - p.mu.Unlock() - for _, pc := range p.freeConns { - if pc.Client != nil { - pc.Client.Close() - } - } - p.mu.Lock() - p.freeConns = nil - p.mu.Unlock() -} - -func (cn *conn) expired(timeout time.Duration) bool { - if timeout <= 0 { - return false - } - return cn.t.Add(timeout).Before(time.Now()) -} diff --git a/vendor/github.com/ONSdigital/gremgo-neptune/request.go b/vendor/github.com/ONSdigital/gremgo-neptune/request.go deleted file mode 100644 index 4718bb2c..00000000 --- a/vendor/github.com/ONSdigital/gremgo-neptune/request.go +++ /dev/null @@ -1,96 +0,0 @@ -package gremgo - -import ( - "context" - "encoding/base64" - "encoding/json" - - "github.com/gofrs/uuid" -) - -const mimeTypeStr = "application/vnd.gremlin-v3.0+json" - -// create the header as []byte with the length byte as prefix -var mimeTypePrefix = append([]byte{byte(len(mimeTypeStr))}, []byte(mimeTypeStr)...) - -type requester interface { - prepare() error - getID() string - getRequest() request -} - -// request is a container for all evaluation request parameters to be sent to the Gremlin Server. -type request struct { - RequestID string `json:"requestId"` - Op string `json:"op"` - Processor string `json:"processor"` - Args map[string]interface{} `json:"args"` -} - -// prepareRequest packages a query and binding into the format that Gremlin Server accepts -func prepareRequest(query string, bindings, rebindings map[string]string) (req request, id string, err error) { - var uuID uuid.UUID - if uuID, err = uuid.NewV4(); err != nil { - return - } - id = uuID.String() - - req.RequestID = id - req.Op = "eval" - req.Processor = "" - - req.Args = make(map[string]interface{}) - req.Args["language"] = "gremlin-groovy" - req.Args["gremlin"] = query - if len(bindings) > 0 || len(rebindings) > 0 { - req.Args["bindings"] = bindings - req.Args["rebindings"] = rebindings - } - - return -} - -//prepareAuthRequest creates a ws request for Gremlin Server -func prepareAuthRequest(requestID string, username string, password string) (req request, err error) { - req.RequestID = requestID - req.Op = "authentication" - req.Processor = "trasversal" - - var simpleAuth []byte - user := []byte(username) - pass := []byte(password) - - simpleAuth = append(simpleAuth, 0) - simpleAuth = append(simpleAuth, user...) - simpleAuth = append(simpleAuth, 0) - simpleAuth = append(simpleAuth, pass...) - - req.Args = make(map[string]interface{}) - req.Args["sasl"] = base64.StdEncoding.EncodeToString(simpleAuth) - - return -} - -// packageRequest takes a request type and formats it into being able to be delivered to Gremlin Server -func packageRequest(req request) (msg []byte, err error) { - j, err := json.Marshal(req) // Formats request into byte format - if err != nil { - return - } - return append(mimeTypePrefix, j...), nil -} - -// dispatchRequest sends the request for writing to the remote Gremlin Server -func (c *Client) dispatchRequest(msg []byte) { - c.requests <- msg -} - -// dispatchRequestCtx sends the request for writing to the remote Gremlin Server -func (c *Client) dispatchRequestCtx(ctx context.Context, msg []byte) (err error) { - select { - case c.requests <- msg: - case <-ctx.Done(): - err = ctx.Err() - } - return -} diff --git a/vendor/github.com/ONSdigital/gremgo-neptune/response.go b/vendor/github.com/ONSdigital/gremgo-neptune/response.go deleted file mode 100644 index 620a1c82..00000000 --- a/vendor/github.com/ONSdigital/gremgo-neptune/response.go +++ /dev/null @@ -1,231 +0,0 @@ -package gremgo - -import ( - "context" - "encoding/json" - "fmt" - - "github.com/pkg/errors" -) - -const ( - StatusSuccess = 200 - StatusNoContent = 204 - StatusPartialContent = 206 - StatusUnauthorized = 401 - StatusAuthenticate = 407 - StatusMalformedRequest = 498 - StatusInvalidRequestArguments = 499 - StatusServerError = 500 - StatusScriptEvaluationError = 597 - StatusServerTimeout = 598 - StatusServerSerializationError = 599 -) - -// Status struct is used to hold properties returned from requests to the gremlin server -type Status struct { - Message string `json:"message"` - Code int `json:"code"` - Attributes map[string]interface{} `json:"attributes"` -} - -// Result struct is used to hold properties returned for results from requests to the gremlin server -type Result struct { - // Query Response Data - Data json.RawMessage `json:"data"` - Meta map[string]interface{} `json:"meta"` -} - -// Response structs holds the entire response from requests to the gremlin server -type Response struct { - RequestID string `json:"requestId"` - Status Status `json:"status"` - Result Result `json:"result"` -} - -// ToString returns a string representation of the Response struct -func (r Response) ToString() string { - return fmt.Sprintf("Response \nRequestID: %v, \nStatus: {%#v}, \nResult: {%#v}\n", r.RequestID, r.Status, r.Result) -} - -func (c *Client) saveWorkerCtx(ctx context.Context, msgChan chan []byte, errs chan error) { - for { - select { - case msg := <-msgChan: - if err := c.handleResponse(msg); err != nil { - errs <- errors.Wrapf(err, "saveWorkerCtx: handleResponse error") - } - case <-ctx.Done(): - return - } - } -} - -func (c *Client) handleResponse(msg []byte) (err error) { - var resp Response - resp, err = marshalResponse(msg) - if resp.Status.Code == StatusAuthenticate { //Server request authentication - return c.authenticate(resp.RequestID) - } - c.saveResponse(resp, err) - return -} - -// marshalResponse creates a response struct for every incoming response for further manipulation -func marshalResponse(msg []byte) (resp Response, err error) { - err = json.Unmarshal(msg, &resp) - if err != nil { - return - } - - err = resp.detectError() - return -} - -// saveResponse makes the response (and its err) available for retrieval by the requester. -// Mutexes are used for thread safety. -func (c *Client) saveResponse(resp Response, err error) { - c.Lock() - defer c.Unlock() - var newdata []interface{} - existingData, ok := c.results.Load(resp.RequestID) // Retrieve old data container (for requests with multiple responses) - if ok { - newdata = append(existingData.([]interface{}), resp) // Create new data container with new data - existingData = nil - } else { - newdata = append(newdata, resp) - } - c.results.Store(resp.RequestID, newdata) // Add new data to buffer for future retrieval - respNotifier, _ := c.responseNotifier.LoadOrStore(resp.RequestID, make(chan error, 1)) - // err is from marshalResponse (json.Unmarshal), but is ignored when Code==statusPartialContent - if resp.Status.Code == StatusPartialContent { - if chunkNotifier, ok := c.chunkNotifier.Load(resp.RequestID); ok { - chunkNotifier.(chan bool) <- true - } - } else { - respNotifier.(chan error) <- err - } -} - -// retrieveResponse retrieves the response saved by saveResponse. -func (c *Client) retrieveResponse(id string) (data []Response, err error) { - resp, _ := c.responseNotifier.Load(id) - if err = <-resp.(chan error); err == nil { - data = c.getCurrentResults(id) - } - c.cleanResults(id, resp.(chan error), nil) - return -} - -func (c *Client) getCurrentResults(id string) (data []Response) { - dataI, ok := c.results.Load(id) - if !ok { - return - } - d := dataI.([]interface{}) - dataI = nil - data = make([]Response, len(d)) - if len(d) == 0 { - return - } - for i := range d { - data[i] = d[i].(Response) - } - return -} - -func (c *Client) cleanResults(id string, respNotifier chan error, chunkNotifier chan bool) { - if respNotifier == nil { - return - } - c.responseNotifier.Delete(id) - close(respNotifier) - if chunkNotifier != nil { - close(chunkNotifier) - c.chunkNotifier.Delete(id) - } - c.deleteResponse(id) -} - -// retrieveResponseCtx retrieves the response saved by saveResponse. -func (c *Client) retrieveResponseCtx(ctx context.Context, id string) (data []Response, err error) { - respNotifier, _ := c.responseNotifier.Load(id) - select { - case err = <-respNotifier.(chan error): - defer c.cleanResults(id, respNotifier.(chan error), nil) - if err != nil { - return - } - data = c.getCurrentResults(id) - case <-ctx.Done(): - err = ctx.Err() - } - return -} - -// retrieveNextResponseCtx retrieves the current response (may be empty!) saved by saveResponse, -// `done` is true when the results are complete (eof) -func (c *Client) retrieveNextResponseCtx(ctx context.Context, cursor *Cursor) (data []Response, done bool, err error) { - c.Lock() - respNotifier, ok := c.responseNotifier.Load(cursor.ID) - c.Unlock() - if respNotifier == nil || !ok { - return - } - - var chunkNotifier chan bool - if chunkNotifierInterface, ok := c.chunkNotifier.Load(cursor.ID); ok { - chunkNotifier = chunkNotifierInterface.(chan bool) - } - - select { - case err = <-respNotifier.(chan error): - defer c.cleanResults(cursor.ID, respNotifier.(chan error), chunkNotifier) - if err != nil { - return - } - data = c.getCurrentResults(cursor.ID) - done = true - case <-chunkNotifier: - c.Lock() - data = c.getCurrentResults(cursor.ID) - c.deleteResponse(cursor.ID) - c.Unlock() - case <-ctx.Done(): - err = ctx.Err() - } - - return -} - -// deleteResponse deletes the response from the container. Used for cleanup purposes by requester. -func (c *Client) deleteResponse(id string) { - c.results.Delete(id) - return -} - -// detectError detects any possible errors in responses from Gremlin Server and generates an error for each code -func (r *Response) detectError() (err error) { - switch r.Status.Code { - case StatusSuccess, StatusNoContent, StatusPartialContent: - case StatusUnauthorized: - err = fmt.Errorf("UNAUTHORIZED - Response Message: %s", r.Status.Message) - case StatusAuthenticate: - err = fmt.Errorf("AUTHENTICATE - Response Message: %s", r.Status.Message) - case StatusMalformedRequest: - err = fmt.Errorf("MALFORMED REQUEST - Response Message: %s", r.Status.Message) - case StatusInvalidRequestArguments: - err = fmt.Errorf("INVALID REQUEST ARGUMENTS - Response Message: %s", r.Status.Message) - case StatusServerError: - err = fmt.Errorf("SERVER ERROR - Response Message: %s", r.Status.Message) - case StatusScriptEvaluationError: - err = fmt.Errorf("SCRIPT EVALUATION ERROR - Response Message: %s", r.Status.Message) - case StatusServerTimeout: - err = fmt.Errorf("SERVER TIMEOUT - Response Message: %s", r.Status.Message) - case StatusServerSerializationError: - err = fmt.Errorf("SERVER SERIALIZATION ERROR - Response Message: %s", r.Status.Message) - default: - err = fmt.Errorf("UNKNOWN ERROR - Response Message: %s", r.Status.Message) - } - return -} diff --git a/vendor/github.com/ONSdigital/gremgo-neptune/tags.go b/vendor/github.com/ONSdigital/gremgo-neptune/tags.go deleted file mode 100644 index 8339d39a..00000000 --- a/vendor/github.com/ONSdigital/gremgo-neptune/tags.go +++ /dev/null @@ -1,42 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package gremgo - -import "strings" - -// tagOptions is the string following a comma in a struct field's "json" -// tag, or the empty string. It does not include the leading comma. -type tagOptions string - -// parseTag splits a struct field's json tag into its name and -// comma-separated options. -func parseTag(tag string) (string, tagOptions) { - if idx := strings.Index(tag, ","); idx != -1 { - return tag[:idx], tagOptions(tag[idx+1:]) - } - return tag, tagOptions("") -} - -// Contains reports whether a comma-separated list of options -// contains a particular substr flag. substr must be surrounded by a -// string boundary or commas. -func (o tagOptions) Contains(optionName string) bool { - if len(o) == 0 { - return false - } - s := string(o) - for s != "" { - var next string - i := strings.Index(s, ",") - if i >= 0 { - s, next = s[:i], s[i+1:] - } - if s == optionName { - return true - } - s = next - } - return false -} diff --git a/vendor/github.com/gofrs/uuid/LICENSE b/vendor/github.com/gofrs/uuid/LICENSE deleted file mode 100644 index 926d5498..00000000 --- a/vendor/github.com/gofrs/uuid/LICENSE +++ /dev/null @@ -1,20 +0,0 @@ -Copyright (C) 2013-2018 by Maxim Bublis - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -"Software"), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/gofrs/uuid/README.md b/vendor/github.com/gofrs/uuid/README.md deleted file mode 100644 index efc3204f..00000000 --- a/vendor/github.com/gofrs/uuid/README.md +++ /dev/null @@ -1,109 +0,0 @@ -# UUID - -[![License](https://img.shields.io/github/license/gofrs/uuid.svg)](https://github.com/gofrs/uuid/blob/master/LICENSE) -[![Build Status](https://travis-ci.org/gofrs/uuid.svg?branch=master)](https://travis-ci.org/gofrs/uuid) -[![GoDoc](http://godoc.org/github.com/gofrs/uuid?status.svg)](http://godoc.org/github.com/gofrs/uuid) -[![Coverage Status](https://codecov.io/gh/gofrs/uuid/branch/master/graphs/badge.svg?branch=master)](https://codecov.io/gh/gofrs/uuid/) -[![Go Report Card](https://goreportcard.com/badge/github.com/gofrs/uuid)](https://goreportcard.com/report/github.com/gofrs/uuid) - -Package uuid provides a pure Go implementation of Universally Unique Identifiers -(UUID) variant as defined in RFC-4122. This package supports both the creation -and parsing of UUIDs in different formats. - -This package supports the following UUID versions: -* Version 1, based on timestamp and MAC address (RFC-4122) -* Version 2, based on timestamp, MAC address and POSIX UID/GID (DCE 1.1) -* Version 3, based on MD5 hashing of a named value (RFC-4122) -* Version 4, based on random numbers (RFC-4122) -* Version 5, based on SHA-1 hashing of a named value (RFC-4122) - -## Project History - -This project was originally forked from the -[github.com/satori/go.uuid](https://github.com/satori/go.uuid) repository after -it appeared to be no longer maintained, while exhibiting [critical -flaws](https://github.com/satori/go.uuid/issues/73). We have decided to take -over this project to ensure it receives regular maintenance for the benefit of -the larger Go community. - -We'd like to thank Maxim Bublis for his hard work on the original iteration of -the package. - -## License - -This source code of this package is released under the MIT License. Please see -the [LICENSE](https://github.com/gofrs/uuid/blob/master/LICENSE) for the full -content of the license. - -## Recommended Package Version - -We recommend using v2.0.0+ of this package, as versions prior to 2.0.0 were -created before our fork of the original package and have some known -deficiencies. - -## Installation - -It is recommended to use a package manager like `dep` that understands tagged -releases of a package, as well as semantic versioning. - -If you are unable to make use of a dependency manager with your project, you can -use the `go get` command to download it directly: - -```Shell -$ go get github.com/gofrs/uuid -``` - -## Requirements - -Due to subtests not being supported in older versions of Go, this package is -only regularly tested against Go 1.7+. This package may work perfectly fine with -Go 1.2+, but support for these older versions is not actively maintained. - -## Go 1.11 Modules - -As of v3.2.0, this repository no longer adopts Go modules, and v3.2.0 no longer has a `go.mod` file. As a result, v3.2.0 also drops support for the `github.com/gofrs/uuid/v3` import path. Only module-based consumers are impacted. With the v3.2.0 release, _all_ gofrs/uuid consumers should use the `github.com/gofrs/uuid` import path. - -An existing module-based consumer will continue to be able to build using the `github.com/gofrs/uuid/v3` import path using any valid consumer `go.mod` that worked prior to the publishing of v3.2.0, but any module-based consumer should start using the `github.com/gofrs/uuid` import path when possible and _must_ use the `github.com/gofrs/uuid` import path prior to upgrading to v3.2.0. - -Please refer to [Issue #61](https://github.com/gofrs/uuid/issues/61) and [Issue #66](https://github.com/gofrs/uuid/issues/66) for more details. - -## Usage - -Here is a quick overview of how to use this package. For more detailed -documentation, please see the [GoDoc Page](http://godoc.org/github.com/gofrs/uuid). - -```go -package main - -import ( - "log" - - "github.com/gofrs/uuid" -) - -// Create a Version 4 UUID, panicking on error. -// Use this form to initialize package-level variables. -var u1 = uuid.Must(uuid.NewV4()) - -func main() { - // Create a Version 4 UUID. - u2, err := uuid.NewV4() - if err != nil { - log.Fatalf("failed to generate UUID: %v", err) - } - log.Printf("generated Version 4 UUID %v", u2) - - // Parse a UUID from a string. - s := "6ba7b810-9dad-11d1-80b4-00c04fd430c8" - u3, err := uuid.FromString(s) - if err != nil { - log.Fatalf("failed to parse UUID %q: %v", s, err) - } - log.Printf("successfully parsed UUID %v", u3) -} -``` - -## References - -* [RFC-4122](https://tools.ietf.org/html/rfc4122) -* [DCE 1.1: Authentication and Security Services](http://pubs.opengroup.org/onlinepubs/9696989899/chap5.htm#tagcjh_08_02_01_01) diff --git a/vendor/github.com/gofrs/uuid/codec.go b/vendor/github.com/gofrs/uuid/codec.go deleted file mode 100644 index e3014c68..00000000 --- a/vendor/github.com/gofrs/uuid/codec.go +++ /dev/null @@ -1,212 +0,0 @@ -// Copyright (C) 2013-2018 by Maxim Bublis -// -// Permission is hereby granted, free of charge, to any person obtaining -// a copy of this software and associated documentation files (the -// "Software"), to deal in the Software without restriction, including -// without limitation the rights to use, copy, modify, merge, publish, -// distribute, sublicense, and/or sell copies of the Software, and to -// permit persons to whom the Software is furnished to do so, subject to -// the following conditions: -// -// The above copyright notice and this permission notice shall be -// included in all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - -package uuid - -import ( - "bytes" - "encoding/hex" - "fmt" -) - -// FromBytes returns a UUID generated from the raw byte slice input. -// It will return an error if the slice isn't 16 bytes long. -func FromBytes(input []byte) (UUID, error) { - u := UUID{} - err := u.UnmarshalBinary(input) - return u, err -} - -// FromBytesOrNil returns a UUID generated from the raw byte slice input. -// Same behavior as FromBytes(), but returns uuid.Nil instead of an error. -func FromBytesOrNil(input []byte) UUID { - uuid, err := FromBytes(input) - if err != nil { - return Nil - } - return uuid -} - -// FromString returns a UUID parsed from the input string. -// Input is expected in a form accepted by UnmarshalText. -func FromString(input string) (UUID, error) { - u := UUID{} - err := u.UnmarshalText([]byte(input)) - return u, err -} - -// FromStringOrNil returns a UUID parsed from the input string. -// Same behavior as FromString(), but returns uuid.Nil instead of an error. -func FromStringOrNil(input string) UUID { - uuid, err := FromString(input) - if err != nil { - return Nil - } - return uuid -} - -// MarshalText implements the encoding.TextMarshaler interface. -// The encoding is the same as returned by the String() method. -func (u UUID) MarshalText() ([]byte, error) { - return []byte(u.String()), nil -} - -// UnmarshalText implements the encoding.TextUnmarshaler interface. -// Following formats are supported: -// -// "6ba7b810-9dad-11d1-80b4-00c04fd430c8", -// "{6ba7b810-9dad-11d1-80b4-00c04fd430c8}", -// "urn:uuid:6ba7b810-9dad-11d1-80b4-00c04fd430c8" -// "6ba7b8109dad11d180b400c04fd430c8" -// "{6ba7b8109dad11d180b400c04fd430c8}", -// "urn:uuid:6ba7b8109dad11d180b400c04fd430c8" -// -// ABNF for supported UUID text representation follows: -// -// URN := 'urn' -// UUID-NID := 'uuid' -// -// hexdig := '0' | '1' | '2' | '3' | '4' | '5' | '6' | '7' | '8' | '9' | -// 'a' | 'b' | 'c' | 'd' | 'e' | 'f' | -// 'A' | 'B' | 'C' | 'D' | 'E' | 'F' -// -// hexoct := hexdig hexdig -// 2hexoct := hexoct hexoct -// 4hexoct := 2hexoct 2hexoct -// 6hexoct := 4hexoct 2hexoct -// 12hexoct := 6hexoct 6hexoct -// -// hashlike := 12hexoct -// canonical := 4hexoct '-' 2hexoct '-' 2hexoct '-' 6hexoct -// -// plain := canonical | hashlike -// uuid := canonical | hashlike | braced | urn -// -// braced := '{' plain '}' | '{' hashlike '}' -// urn := URN ':' UUID-NID ':' plain -// -func (u *UUID) UnmarshalText(text []byte) error { - switch len(text) { - case 32: - return u.decodeHashLike(text) - case 34, 38: - return u.decodeBraced(text) - case 36: - return u.decodeCanonical(text) - case 41, 45: - return u.decodeURN(text) - default: - return fmt.Errorf("uuid: incorrect UUID length %d in string %q", len(text), text) - } -} - -// decodeCanonical decodes UUID strings that are formatted as defined in RFC-4122 (section 3): -// "6ba7b810-9dad-11d1-80b4-00c04fd430c8". -func (u *UUID) decodeCanonical(t []byte) error { - if t[8] != '-' || t[13] != '-' || t[18] != '-' || t[23] != '-' { - return fmt.Errorf("uuid: incorrect UUID format in string %q", t) - } - - src := t - dst := u[:] - - for i, byteGroup := range byteGroups { - if i > 0 { - src = src[1:] // skip dash - } - _, err := hex.Decode(dst[:byteGroup/2], src[:byteGroup]) - if err != nil { - return err - } - src = src[byteGroup:] - dst = dst[byteGroup/2:] - } - - return nil -} - -// decodeHashLike decodes UUID strings that are using the following format: -// "6ba7b8109dad11d180b400c04fd430c8". -func (u *UUID) decodeHashLike(t []byte) error { - src := t[:] - dst := u[:] - - _, err := hex.Decode(dst, src) - return err -} - -// decodeBraced decodes UUID strings that are using the following formats: -// "{6ba7b810-9dad-11d1-80b4-00c04fd430c8}" -// "{6ba7b8109dad11d180b400c04fd430c8}". -func (u *UUID) decodeBraced(t []byte) error { - l := len(t) - - if t[0] != '{' || t[l-1] != '}' { - return fmt.Errorf("uuid: incorrect UUID format in string %q", t) - } - - return u.decodePlain(t[1 : l-1]) -} - -// decodeURN decodes UUID strings that are using the following formats: -// "urn:uuid:6ba7b810-9dad-11d1-80b4-00c04fd430c8" -// "urn:uuid:6ba7b8109dad11d180b400c04fd430c8". -func (u *UUID) decodeURN(t []byte) error { - total := len(t) - - urnUUIDPrefix := t[:9] - - if !bytes.Equal(urnUUIDPrefix, urnPrefix) { - return fmt.Errorf("uuid: incorrect UUID format in string %q", t) - } - - return u.decodePlain(t[9:total]) -} - -// decodePlain decodes UUID strings that are using the following formats: -// "6ba7b810-9dad-11d1-80b4-00c04fd430c8" or in hash-like format -// "6ba7b8109dad11d180b400c04fd430c8". -func (u *UUID) decodePlain(t []byte) error { - switch len(t) { - case 32: - return u.decodeHashLike(t) - case 36: - return u.decodeCanonical(t) - default: - return fmt.Errorf("uuid: incorrect UUID length %d in string %q", len(t), t) - } -} - -// MarshalBinary implements the encoding.BinaryMarshaler interface. -func (u UUID) MarshalBinary() ([]byte, error) { - return u.Bytes(), nil -} - -// UnmarshalBinary implements the encoding.BinaryUnmarshaler interface. -// It will return an error if the slice isn't 16 bytes long. -func (u *UUID) UnmarshalBinary(data []byte) error { - if len(data) != Size { - return fmt.Errorf("uuid: UUID must be exactly 16 bytes long, got %d bytes", len(data)) - } - copy(u[:], data) - - return nil -} diff --git a/vendor/github.com/gofrs/uuid/fuzz.go b/vendor/github.com/gofrs/uuid/fuzz.go deleted file mode 100644 index afaefbc8..00000000 --- a/vendor/github.com/gofrs/uuid/fuzz.go +++ /dev/null @@ -1,47 +0,0 @@ -// Copyright (c) 2018 Andrei Tudor Călin -// -// Permission is hereby granted, free of charge, to any person obtaining -// a copy of this software and associated documentation files (the -// "Software"), to deal in the Software without restriction, including -// without limitation the rights to use, copy, modify, merge, publish, -// distribute, sublicense, and/or sell copies of the Software, and to -// permit persons to whom the Software is furnished to do so, subject to -// the following conditions: -// -// The above copyright notice and this permission notice shall be -// included in all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - -// +build gofuzz - -package uuid - -// Fuzz implements a simple fuzz test for FromString / UnmarshalText. -// -// To run: -// -// $ go get github.com/dvyukov/go-fuzz/... -// $ cd $GOPATH/src/github.com/gofrs/uuid -// $ go-fuzz-build github.com/gofrs/uuid -// $ go-fuzz -bin=uuid-fuzz.zip -workdir=./testdata -// -// If you make significant changes to FromString / UnmarshalText and add -// new cases to fromStringTests (in codec_test.go), please run -// -// $ go test -seed_fuzz_corpus -// -// to seed the corpus with the new interesting inputs, then run the fuzzer. -func Fuzz(data []byte) int { - _, err := FromString(string(data)) - if err != nil { - return 0 - } - return 1 -} diff --git a/vendor/github.com/gofrs/uuid/generator.go b/vendor/github.com/gofrs/uuid/generator.go deleted file mode 100644 index 4257761f..00000000 --- a/vendor/github.com/gofrs/uuid/generator.go +++ /dev/null @@ -1,299 +0,0 @@ -// Copyright (C) 2013-2018 by Maxim Bublis -// -// Permission is hereby granted, free of charge, to any person obtaining -// a copy of this software and associated documentation files (the -// "Software"), to deal in the Software without restriction, including -// without limitation the rights to use, copy, modify, merge, publish, -// distribute, sublicense, and/or sell copies of the Software, and to -// permit persons to whom the Software is furnished to do so, subject to -// the following conditions: -// -// The above copyright notice and this permission notice shall be -// included in all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - -package uuid - -import ( - "crypto/md5" - "crypto/rand" - "crypto/sha1" - "encoding/binary" - "fmt" - "hash" - "io" - "net" - "os" - "sync" - "time" -) - -// Difference in 100-nanosecond intervals between -// UUID epoch (October 15, 1582) and Unix epoch (January 1, 1970). -const epochStart = 122192928000000000 - -type epochFunc func() time.Time - -// HWAddrFunc is the function type used to provide hardware (MAC) addresses. -type HWAddrFunc func() (net.HardwareAddr, error) - -// DefaultGenerator is the default UUID Generator used by this package. -var DefaultGenerator Generator = NewGen() - -var ( - posixUID = uint32(os.Getuid()) - posixGID = uint32(os.Getgid()) -) - -// NewV1 returns a UUID based on the current timestamp and MAC address. -func NewV1() (UUID, error) { - return DefaultGenerator.NewV1() -} - -// NewV2 returns a DCE Security UUID based on the POSIX UID/GID. -func NewV2(domain byte) (UUID, error) { - return DefaultGenerator.NewV2(domain) -} - -// NewV3 returns a UUID based on the MD5 hash of the namespace UUID and name. -func NewV3(ns UUID, name string) UUID { - return DefaultGenerator.NewV3(ns, name) -} - -// NewV4 returns a randomly generated UUID. -func NewV4() (UUID, error) { - return DefaultGenerator.NewV4() -} - -// NewV5 returns a UUID based on SHA-1 hash of the namespace UUID and name. -func NewV5(ns UUID, name string) UUID { - return DefaultGenerator.NewV5(ns, name) -} - -// Generator provides an interface for generating UUIDs. -type Generator interface { - NewV1() (UUID, error) - NewV2(domain byte) (UUID, error) - NewV3(ns UUID, name string) UUID - NewV4() (UUID, error) - NewV5(ns UUID, name string) UUID -} - -// Gen is a reference UUID generator based on the specifications laid out in -// RFC-4122 and DCE 1.1: Authentication and Security Services. This type -// satisfies the Generator interface as defined in this package. -// -// For consumers who are generating V1 UUIDs, but don't want to expose the MAC -// address of the node generating the UUIDs, the NewGenWithHWAF() function has been -// provided as a convenience. See the function's documentation for more info. -// -// The authors of this package do not feel that the majority of users will need -// to obfuscate their MAC address, and so we recommend using NewGen() to create -// a new generator. -type Gen struct { - clockSequenceOnce sync.Once - hardwareAddrOnce sync.Once - storageMutex sync.Mutex - - rand io.Reader - - epochFunc epochFunc - hwAddrFunc HWAddrFunc - lastTime uint64 - clockSequence uint16 - hardwareAddr [6]byte -} - -// interface check -- build will fail if *Gen doesn't satisfy Generator -var _ Generator = (*Gen)(nil) - -// NewGen returns a new instance of Gen with some default values set. Most -// people should use this. -func NewGen() *Gen { - return NewGenWithHWAF(defaultHWAddrFunc) -} - -// NewGenWithHWAF builds a new UUID generator with the HWAddrFunc provided. Most -// consumers should use NewGen() instead. -// -// This is used so that consumers can generate their own MAC addresses, for use -// in the generated UUIDs, if there is some concern about exposing the physical -// address of the machine generating the UUID. -// -// The Gen generator will only invoke the HWAddrFunc once, and cache that MAC -// address for all the future UUIDs generated by it. If you'd like to switch the -// MAC address being used, you'll need to create a new generator using this -// function. -func NewGenWithHWAF(hwaf HWAddrFunc) *Gen { - return &Gen{ - epochFunc: time.Now, - hwAddrFunc: hwaf, - rand: rand.Reader, - } -} - -// NewV1 returns a UUID based on the current timestamp and MAC address. -func (g *Gen) NewV1() (UUID, error) { - u := UUID{} - - timeNow, clockSeq, err := g.getClockSequence() - if err != nil { - return Nil, err - } - binary.BigEndian.PutUint32(u[0:], uint32(timeNow)) - binary.BigEndian.PutUint16(u[4:], uint16(timeNow>>32)) - binary.BigEndian.PutUint16(u[6:], uint16(timeNow>>48)) - binary.BigEndian.PutUint16(u[8:], clockSeq) - - hardwareAddr, err := g.getHardwareAddr() - if err != nil { - return Nil, err - } - copy(u[10:], hardwareAddr) - - u.SetVersion(V1) - u.SetVariant(VariantRFC4122) - - return u, nil -} - -// NewV2 returns a DCE Security UUID based on the POSIX UID/GID. -func (g *Gen) NewV2(domain byte) (UUID, error) { - u, err := g.NewV1() - if err != nil { - return Nil, err - } - - switch domain { - case DomainPerson: - binary.BigEndian.PutUint32(u[:], posixUID) - case DomainGroup: - binary.BigEndian.PutUint32(u[:], posixGID) - } - - u[9] = domain - - u.SetVersion(V2) - u.SetVariant(VariantRFC4122) - - return u, nil -} - -// NewV3 returns a UUID based on the MD5 hash of the namespace UUID and name. -func (g *Gen) NewV3(ns UUID, name string) UUID { - u := newFromHash(md5.New(), ns, name) - u.SetVersion(V3) - u.SetVariant(VariantRFC4122) - - return u -} - -// NewV4 returns a randomly generated UUID. -func (g *Gen) NewV4() (UUID, error) { - u := UUID{} - if _, err := io.ReadFull(g.rand, u[:]); err != nil { - return Nil, err - } - u.SetVersion(V4) - u.SetVariant(VariantRFC4122) - - return u, nil -} - -// NewV5 returns a UUID based on SHA-1 hash of the namespace UUID and name. -func (g *Gen) NewV5(ns UUID, name string) UUID { - u := newFromHash(sha1.New(), ns, name) - u.SetVersion(V5) - u.SetVariant(VariantRFC4122) - - return u -} - -// Returns the epoch and clock sequence. -func (g *Gen) getClockSequence() (uint64, uint16, error) { - var err error - g.clockSequenceOnce.Do(func() { - buf := make([]byte, 2) - if _, err = io.ReadFull(g.rand, buf); err != nil { - return - } - g.clockSequence = binary.BigEndian.Uint16(buf) - }) - if err != nil { - return 0, 0, err - } - - g.storageMutex.Lock() - defer g.storageMutex.Unlock() - - timeNow := g.getEpoch() - // Clock didn't change since last UUID generation. - // Should increase clock sequence. - if timeNow <= g.lastTime { - g.clockSequence++ - } - g.lastTime = timeNow - - return timeNow, g.clockSequence, nil -} - -// Returns the hardware address. -func (g *Gen) getHardwareAddr() ([]byte, error) { - var err error - g.hardwareAddrOnce.Do(func() { - var hwAddr net.HardwareAddr - if hwAddr, err = g.hwAddrFunc(); err == nil { - copy(g.hardwareAddr[:], hwAddr) - return - } - - // Initialize hardwareAddr randomly in case - // of real network interfaces absence. - if _, err = io.ReadFull(g.rand, g.hardwareAddr[:]); err != nil { - return - } - // Set multicast bit as recommended by RFC-4122 - g.hardwareAddr[0] |= 0x01 - }) - if err != nil { - return []byte{}, err - } - return g.hardwareAddr[:], nil -} - -// Returns the difference between UUID epoch (October 15, 1582) -// and current time in 100-nanosecond intervals. -func (g *Gen) getEpoch() uint64 { - return epochStart + uint64(g.epochFunc().UnixNano()/100) -} - -// Returns the UUID based on the hashing of the namespace UUID and name. -func newFromHash(h hash.Hash, ns UUID, name string) UUID { - u := UUID{} - h.Write(ns[:]) - h.Write([]byte(name)) - copy(u[:], h.Sum(nil)) - - return u -} - -// Returns the hardware address. -func defaultHWAddrFunc() (net.HardwareAddr, error) { - ifaces, err := net.Interfaces() - if err != nil { - return []byte{}, err - } - for _, iface := range ifaces { - if len(iface.HardwareAddr) >= 6 { - return iface.HardwareAddr, nil - } - } - return []byte{}, fmt.Errorf("uuid: no HW address found") -} diff --git a/vendor/github.com/gofrs/uuid/sql.go b/vendor/github.com/gofrs/uuid/sql.go deleted file mode 100644 index 6f254a4f..00000000 --- a/vendor/github.com/gofrs/uuid/sql.go +++ /dev/null @@ -1,109 +0,0 @@ -// Copyright (C) 2013-2018 by Maxim Bublis -// -// Permission is hereby granted, free of charge, to any person obtaining -// a copy of this software and associated documentation files (the -// "Software"), to deal in the Software without restriction, including -// without limitation the rights to use, copy, modify, merge, publish, -// distribute, sublicense, and/or sell copies of the Software, and to -// permit persons to whom the Software is furnished to do so, subject to -// the following conditions: -// -// The above copyright notice and this permission notice shall be -// included in all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - -package uuid - -import ( - "bytes" - "database/sql/driver" - "encoding/json" - "fmt" -) - -// Value implements the driver.Valuer interface. -func (u UUID) Value() (driver.Value, error) { - return u.String(), nil -} - -// Scan implements the sql.Scanner interface. -// A 16-byte slice will be handled by UnmarshalBinary, while -// a longer byte slice or a string will be handled by UnmarshalText. -func (u *UUID) Scan(src interface{}) error { - switch src := src.(type) { - case UUID: // support gorm convert from UUID to NullUUID - *u = src - return nil - - case []byte: - if len(src) == Size { - return u.UnmarshalBinary(src) - } - return u.UnmarshalText(src) - - case string: - return u.UnmarshalText([]byte(src)) - } - - return fmt.Errorf("uuid: cannot convert %T to UUID", src) -} - -// NullUUID can be used with the standard sql package to represent a -// UUID value that can be NULL in the database. -type NullUUID struct { - UUID UUID - Valid bool -} - -// Value implements the driver.Valuer interface. -func (u NullUUID) Value() (driver.Value, error) { - if !u.Valid { - return nil, nil - } - // Delegate to UUID Value function - return u.UUID.Value() -} - -// Scan implements the sql.Scanner interface. -func (u *NullUUID) Scan(src interface{}) error { - if src == nil { - u.UUID, u.Valid = Nil, false - return nil - } - - // Delegate to UUID Scan function - u.Valid = true - return u.UUID.Scan(src) -} - -// MarshalJSON marshals the NullUUID as null or the nested UUID -func (u NullUUID) MarshalJSON() ([]byte, error) { - if !u.Valid { - return json.Marshal(nil) - } - - return json.Marshal(u.UUID) -} - -// UnmarshalJSON unmarshals a NullUUID -func (u *NullUUID) UnmarshalJSON(b []byte) error { - if bytes.Equal(b, []byte("null")) { - u.UUID, u.Valid = Nil, false - return nil - } - - if err := json.Unmarshal(b, &u.UUID); err != nil { - return err - } - - u.Valid = true - - return nil -} diff --git a/vendor/github.com/gofrs/uuid/uuid.go b/vendor/github.com/gofrs/uuid/uuid.go deleted file mode 100644 index 9c4547f1..00000000 --- a/vendor/github.com/gofrs/uuid/uuid.go +++ /dev/null @@ -1,250 +0,0 @@ -// Copyright (C) 2013-2018 by Maxim Bublis -// -// Permission is hereby granted, free of charge, to any person obtaining -// a copy of this software and associated documentation files (the -// "Software"), to deal in the Software without restriction, including -// without limitation the rights to use, copy, modify, merge, publish, -// distribute, sublicense, and/or sell copies of the Software, and to -// permit persons to whom the Software is furnished to do so, subject to -// the following conditions: -// -// The above copyright notice and this permission notice shall be -// included in all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - -// Package uuid provides implementations of the Universally Unique Identifier (UUID), as specified in RFC-4122 and DCE 1.1. -// -// RFC-4122[1] provides the specification for versions 1, 3, 4, and 5. -// -// DCE 1.1[2] provides the specification for version 2. -// -// [1] https://tools.ietf.org/html/rfc4122 -// [2] http://pubs.opengroup.org/onlinepubs/9696989899/chap5.htm#tagcjh_08_02_01_01 -package uuid - -import ( - "encoding/binary" - "encoding/hex" - "fmt" - "io" - "strings" - "time" -) - -// Size of a UUID in bytes. -const Size = 16 - -// UUID is an array type to represent the value of a UUID, as defined in RFC-4122. -type UUID [Size]byte - -// UUID versions. -const ( - _ byte = iota - V1 // Version 1 (date-time and MAC address) - V2 // Version 2 (date-time and MAC address, DCE security version) - V3 // Version 3 (namespace name-based) - V4 // Version 4 (random) - V5 // Version 5 (namespace name-based) -) - -// UUID layout variants. -const ( - VariantNCS byte = iota - VariantRFC4122 - VariantMicrosoft - VariantFuture -) - -// UUID DCE domains. -const ( - DomainPerson = iota - DomainGroup - DomainOrg -) - -// Timestamp is the count of 100-nanosecond intervals since 00:00:00.00, -// 15 October 1582 within a V1 UUID. This type has no meaning for V2-V5 -// UUIDs since they don't have an embedded timestamp. -type Timestamp uint64 - -const _100nsPerSecond = 10000000 - -// Time returns the UTC time.Time representation of a Timestamp -func (t Timestamp) Time() (time.Time, error) { - secs := uint64(t) / _100nsPerSecond - nsecs := 100 * (uint64(t) % _100nsPerSecond) - return time.Unix(int64(secs)-(epochStart/_100nsPerSecond), int64(nsecs)), nil -} - -// TimestampFromV1 returns the Timestamp embedded within a V1 UUID. -// Returns an error if the UUID is any version other than 1. -func TimestampFromV1(u UUID) (Timestamp, error) { - if u.Version() != 1 { - err := fmt.Errorf("uuid: %s is version %d, not version 1", u, u.Version()) - return 0, err - } - low := binary.BigEndian.Uint32(u[0:4]) - mid := binary.BigEndian.Uint16(u[4:6]) - hi := binary.BigEndian.Uint16(u[6:8]) & 0xfff - return Timestamp(uint64(low) + (uint64(mid) << 32) + (uint64(hi) << 48)), nil -} - -// String parse helpers. -var ( - urnPrefix = []byte("urn:uuid:") - byteGroups = []int{8, 4, 4, 4, 12} -) - -// Nil is the nil UUID, as specified in RFC-4122, that has all 128 bits set to -// zero. -var Nil = UUID{} - -// Predefined namespace UUIDs. -var ( - NamespaceDNS = Must(FromString("6ba7b810-9dad-11d1-80b4-00c04fd430c8")) - NamespaceURL = Must(FromString("6ba7b811-9dad-11d1-80b4-00c04fd430c8")) - NamespaceOID = Must(FromString("6ba7b812-9dad-11d1-80b4-00c04fd430c8")) - NamespaceX500 = Must(FromString("6ba7b814-9dad-11d1-80b4-00c04fd430c8")) -) - -// Version returns the algorithm version used to generate the UUID. -func (u UUID) Version() byte { - return u[6] >> 4 -} - -// Variant returns the UUID layout variant. -func (u UUID) Variant() byte { - switch { - case (u[8] >> 7) == 0x00: - return VariantNCS - case (u[8] >> 6) == 0x02: - return VariantRFC4122 - case (u[8] >> 5) == 0x06: - return VariantMicrosoft - case (u[8] >> 5) == 0x07: - fallthrough - default: - return VariantFuture - } -} - -// Bytes returns a byte slice representation of the UUID. -func (u UUID) Bytes() []byte { - return u[:] -} - -// String returns a canonical RFC-4122 string representation of the UUID: -// xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx. -func (u UUID) String() string { - buf := make([]byte, 36) - - hex.Encode(buf[0:8], u[0:4]) - buf[8] = '-' - hex.Encode(buf[9:13], u[4:6]) - buf[13] = '-' - hex.Encode(buf[14:18], u[6:8]) - buf[18] = '-' - hex.Encode(buf[19:23], u[8:10]) - buf[23] = '-' - hex.Encode(buf[24:], u[10:]) - - return string(buf) -} - -// Format implements fmt.Formatter for UUID values. -// -// The behavior is as follows: -// The 'x' and 'X' verbs output only the hex digits of the UUID, using a-f for 'x' and A-F for 'X'. -// The 'v', '+v', 's' and 'q' verbs return the canonical RFC-4122 string representation. -// The 'S' verb returns the RFC-4122 format, but with capital hex digits. -// The '#v' verb returns the "Go syntax" representation, which is a 16 byte array initializer. -// All other verbs not handled directly by the fmt package (like '%p') are unsupported and will return -// "%!verb(uuid.UUID=value)" as recommended by the fmt package. -func (u UUID) Format(f fmt.State, c rune) { - switch c { - case 'x', 'X': - s := hex.EncodeToString(u.Bytes()) - if c == 'X' { - s = strings.Map(toCapitalHexDigits, s) - } - _, _ = io.WriteString(f, s) - case 'v': - var s string - if f.Flag('#') { - s = fmt.Sprintf("%#v", [Size]byte(u)) - } else { - s = u.String() - } - _, _ = io.WriteString(f, s) - case 's', 'S': - s := u.String() - if c == 'S' { - s = strings.Map(toCapitalHexDigits, s) - } - _, _ = io.WriteString(f, s) - case 'q': - _, _ = io.WriteString(f, `"`+u.String()+`"`) - default: - // invalid/unsupported format verb - fmt.Fprintf(f, "%%!%c(uuid.UUID=%s)", c, u.String()) - } -} - -func toCapitalHexDigits(ch rune) rune { - // convert a-f hex digits to A-F - switch ch { - case 'a': - return 'A' - case 'b': - return 'B' - case 'c': - return 'C' - case 'd': - return 'D' - case 'e': - return 'E' - case 'f': - return 'F' - default: - return ch - } -} - -// SetVersion sets the version bits. -func (u *UUID) SetVersion(v byte) { - u[6] = (u[6] & 0x0f) | (v << 4) -} - -// SetVariant sets the variant bits. -func (u *UUID) SetVariant(v byte) { - switch v { - case VariantNCS: - u[8] = (u[8]&(0xff>>1) | (0x00 << 7)) - case VariantRFC4122: - u[8] = (u[8]&(0xff>>2) | (0x02 << 6)) - case VariantMicrosoft: - u[8] = (u[8]&(0xff>>3) | (0x06 << 5)) - case VariantFuture: - fallthrough - default: - u[8] = (u[8]&(0xff>>3) | (0x07 << 5)) - } -} - -// Must is a helper that wraps a call to a function returning (UUID, error) -// and panics if the error is non-nil. It is intended for use in variable -// initializations such as -// var packageUUID = uuid.Must(uuid.FromString("123e4567-e89b-12d3-a456-426655440000")) -func Must(u UUID, err error) UUID { - if err != nil { - panic(err) - } - return u -} diff --git a/vendor/github.com/gorilla/websocket/AUTHORS b/vendor/github.com/gorilla/websocket/AUTHORS deleted file mode 100644 index 1931f400..00000000 --- a/vendor/github.com/gorilla/websocket/AUTHORS +++ /dev/null @@ -1,9 +0,0 @@ -# This is the official list of Gorilla WebSocket authors for copyright -# purposes. -# -# Please keep the list sorted. - -Gary Burd -Google LLC (https://opensource.google.com/) -Joachim Bauch - diff --git a/vendor/github.com/gorilla/websocket/LICENSE b/vendor/github.com/gorilla/websocket/LICENSE deleted file mode 100644 index 9171c972..00000000 --- a/vendor/github.com/gorilla/websocket/LICENSE +++ /dev/null @@ -1,22 +0,0 @@ -Copyright (c) 2013 The Gorilla WebSocket Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - - Redistributions of source code must retain the above copyright notice, this - list of conditions and the following disclaimer. - - Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE -FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, -OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/gorilla/websocket/README.md b/vendor/github.com/gorilla/websocket/README.md deleted file mode 100644 index f26fd466..00000000 --- a/vendor/github.com/gorilla/websocket/README.md +++ /dev/null @@ -1,64 +0,0 @@ -# Gorilla WebSocket - -Gorilla WebSocket is a [Go](http://golang.org/) implementation of the -[WebSocket](http://www.rfc-editor.org/rfc/rfc6455.txt) protocol. - -[![Build Status](https://travis-ci.org/gorilla/websocket.svg?branch=master)](https://travis-ci.org/gorilla/websocket) -[![GoDoc](https://godoc.org/github.com/gorilla/websocket?status.svg)](https://godoc.org/github.com/gorilla/websocket) - -### Documentation - -* [API Reference](http://godoc.org/github.com/gorilla/websocket) -* [Chat example](https://github.com/gorilla/websocket/tree/master/examples/chat) -* [Command example](https://github.com/gorilla/websocket/tree/master/examples/command) -* [Client and server example](https://github.com/gorilla/websocket/tree/master/examples/echo) -* [File watch example](https://github.com/gorilla/websocket/tree/master/examples/filewatch) - -### Status - -The Gorilla WebSocket package provides a complete and tested implementation of -the [WebSocket](http://www.rfc-editor.org/rfc/rfc6455.txt) protocol. The -package API is stable. - -### Installation - - go get github.com/gorilla/websocket - -### Protocol Compliance - -The Gorilla WebSocket package passes the server tests in the [Autobahn Test -Suite](https://github.com/crossbario/autobahn-testsuite) using the application in the [examples/autobahn -subdirectory](https://github.com/gorilla/websocket/tree/master/examples/autobahn). - -### Gorilla WebSocket compared with other packages - - - - - - - - - - - - - - - - - - -
github.com/gorillagolang.org/x/net
RFC 6455 Features
Passes Autobahn Test SuiteYesNo
Receive fragmented messageYesNo, see note 1
Send close messageYesNo
Send pings and receive pongsYesNo
Get the type of a received data messageYesYes, see note 2
Other Features
Compression ExtensionsExperimentalNo
Read message using io.ReaderYesNo, see note 3
Write message using io.WriteCloserYesNo, see note 3
- -Notes: - -1. Large messages are fragmented in [Chrome's new WebSocket implementation](http://www.ietf.org/mail-archive/web/hybi/current/msg10503.html). -2. The application can get the type of a received data message by implementing - a [Codec marshal](http://godoc.org/golang.org/x/net/websocket#Codec.Marshal) - function. -3. The go.net io.Reader and io.Writer operate across WebSocket frame boundaries. - Read returns when the input buffer is full or a frame boundary is - encountered. Each call to Write sends a single frame message. The Gorilla - io.Reader and io.WriteCloser operate on a single WebSocket message. - diff --git a/vendor/github.com/gorilla/websocket/client.go b/vendor/github.com/gorilla/websocket/client.go deleted file mode 100644 index 962c06a3..00000000 --- a/vendor/github.com/gorilla/websocket/client.go +++ /dev/null @@ -1,395 +0,0 @@ -// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package websocket - -import ( - "bytes" - "context" - "crypto/tls" - "errors" - "io" - "io/ioutil" - "net" - "net/http" - "net/http/httptrace" - "net/url" - "strings" - "time" -) - -// ErrBadHandshake is returned when the server response to opening handshake is -// invalid. -var ErrBadHandshake = errors.New("websocket: bad handshake") - -var errInvalidCompression = errors.New("websocket: invalid compression negotiation") - -// NewClient creates a new client connection using the given net connection. -// The URL u specifies the host and request URI. Use requestHeader to specify -// the origin (Origin), subprotocols (Sec-WebSocket-Protocol) and cookies -// (Cookie). Use the response.Header to get the selected subprotocol -// (Sec-WebSocket-Protocol) and cookies (Set-Cookie). -// -// If the WebSocket handshake fails, ErrBadHandshake is returned along with a -// non-nil *http.Response so that callers can handle redirects, authentication, -// etc. -// -// Deprecated: Use Dialer instead. -func NewClient(netConn net.Conn, u *url.URL, requestHeader http.Header, readBufSize, writeBufSize int) (c *Conn, response *http.Response, err error) { - d := Dialer{ - ReadBufferSize: readBufSize, - WriteBufferSize: writeBufSize, - NetDial: func(net, addr string) (net.Conn, error) { - return netConn, nil - }, - } - return d.Dial(u.String(), requestHeader) -} - -// A Dialer contains options for connecting to WebSocket server. -type Dialer struct { - // NetDial specifies the dial function for creating TCP connections. If - // NetDial is nil, net.Dial is used. - NetDial func(network, addr string) (net.Conn, error) - - // NetDialContext specifies the dial function for creating TCP connections. If - // NetDialContext is nil, net.DialContext is used. - NetDialContext func(ctx context.Context, network, addr string) (net.Conn, error) - - // Proxy specifies a function to return a proxy for a given - // Request. If the function returns a non-nil error, the - // request is aborted with the provided error. - // If Proxy is nil or returns a nil *URL, no proxy is used. - Proxy func(*http.Request) (*url.URL, error) - - // TLSClientConfig specifies the TLS configuration to use with tls.Client. - // If nil, the default configuration is used. - TLSClientConfig *tls.Config - - // HandshakeTimeout specifies the duration for the handshake to complete. - HandshakeTimeout time.Duration - - // ReadBufferSize and WriteBufferSize specify I/O buffer sizes in bytes. If a buffer - // size is zero, then a useful default size is used. The I/O buffer sizes - // do not limit the size of the messages that can be sent or received. - ReadBufferSize, WriteBufferSize int - - // WriteBufferPool is a pool of buffers for write operations. If the value - // is not set, then write buffers are allocated to the connection for the - // lifetime of the connection. - // - // A pool is most useful when the application has a modest volume of writes - // across a large number of connections. - // - // Applications should use a single pool for each unique value of - // WriteBufferSize. - WriteBufferPool BufferPool - - // Subprotocols specifies the client's requested subprotocols. - Subprotocols []string - - // EnableCompression specifies if the client should attempt to negotiate - // per message compression (RFC 7692). Setting this value to true does not - // guarantee that compression will be supported. Currently only "no context - // takeover" modes are supported. - EnableCompression bool - - // Jar specifies the cookie jar. - // If Jar is nil, cookies are not sent in requests and ignored - // in responses. - Jar http.CookieJar -} - -// Dial creates a new client connection by calling DialContext with a background context. -func (d *Dialer) Dial(urlStr string, requestHeader http.Header) (*Conn, *http.Response, error) { - return d.DialContext(context.Background(), urlStr, requestHeader) -} - -var errMalformedURL = errors.New("malformed ws or wss URL") - -func hostPortNoPort(u *url.URL) (hostPort, hostNoPort string) { - hostPort = u.Host - hostNoPort = u.Host - if i := strings.LastIndex(u.Host, ":"); i > strings.LastIndex(u.Host, "]") { - hostNoPort = hostNoPort[:i] - } else { - switch u.Scheme { - case "wss": - hostPort += ":443" - case "https": - hostPort += ":443" - default: - hostPort += ":80" - } - } - return hostPort, hostNoPort -} - -// DefaultDialer is a dialer with all fields set to the default values. -var DefaultDialer = &Dialer{ - Proxy: http.ProxyFromEnvironment, - HandshakeTimeout: 45 * time.Second, -} - -// nilDialer is dialer to use when receiver is nil. -var nilDialer = *DefaultDialer - -// DialContext creates a new client connection. Use requestHeader to specify the -// origin (Origin), subprotocols (Sec-WebSocket-Protocol) and cookies (Cookie). -// Use the response.Header to get the selected subprotocol -// (Sec-WebSocket-Protocol) and cookies (Set-Cookie). -// -// The context will be used in the request and in the Dialer. -// -// If the WebSocket handshake fails, ErrBadHandshake is returned along with a -// non-nil *http.Response so that callers can handle redirects, authentication, -// etcetera. The response body may not contain the entire response and does not -// need to be closed by the application. -func (d *Dialer) DialContext(ctx context.Context, urlStr string, requestHeader http.Header) (*Conn, *http.Response, error) { - if d == nil { - d = &nilDialer - } - - challengeKey, err := generateChallengeKey() - if err != nil { - return nil, nil, err - } - - u, err := url.Parse(urlStr) - if err != nil { - return nil, nil, err - } - - switch u.Scheme { - case "ws": - u.Scheme = "http" - case "wss": - u.Scheme = "https" - default: - return nil, nil, errMalformedURL - } - - if u.User != nil { - // User name and password are not allowed in websocket URIs. - return nil, nil, errMalformedURL - } - - req := &http.Request{ - Method: "GET", - URL: u, - Proto: "HTTP/1.1", - ProtoMajor: 1, - ProtoMinor: 1, - Header: make(http.Header), - Host: u.Host, - } - req = req.WithContext(ctx) - - // Set the cookies present in the cookie jar of the dialer - if d.Jar != nil { - for _, cookie := range d.Jar.Cookies(u) { - req.AddCookie(cookie) - } - } - - // Set the request headers using the capitalization for names and values in - // RFC examples. Although the capitalization shouldn't matter, there are - // servers that depend on it. The Header.Set method is not used because the - // method canonicalizes the header names. - req.Header["Upgrade"] = []string{"websocket"} - req.Header["Connection"] = []string{"Upgrade"} - req.Header["Sec-WebSocket-Key"] = []string{challengeKey} - req.Header["Sec-WebSocket-Version"] = []string{"13"} - if len(d.Subprotocols) > 0 { - req.Header["Sec-WebSocket-Protocol"] = []string{strings.Join(d.Subprotocols, ", ")} - } - for k, vs := range requestHeader { - switch { - case k == "Host": - if len(vs) > 0 { - req.Host = vs[0] - } - case k == "Upgrade" || - k == "Connection" || - k == "Sec-Websocket-Key" || - k == "Sec-Websocket-Version" || - k == "Sec-Websocket-Extensions" || - (k == "Sec-Websocket-Protocol" && len(d.Subprotocols) > 0): - return nil, nil, errors.New("websocket: duplicate header not allowed: " + k) - case k == "Sec-Websocket-Protocol": - req.Header["Sec-WebSocket-Protocol"] = vs - default: - req.Header[k] = vs - } - } - - if d.EnableCompression { - req.Header["Sec-WebSocket-Extensions"] = []string{"permessage-deflate; server_no_context_takeover; client_no_context_takeover"} - } - - if d.HandshakeTimeout != 0 { - var cancel func() - ctx, cancel = context.WithTimeout(ctx, d.HandshakeTimeout) - defer cancel() - } - - // Get network dial function. - var netDial func(network, add string) (net.Conn, error) - - if d.NetDialContext != nil { - netDial = func(network, addr string) (net.Conn, error) { - return d.NetDialContext(ctx, network, addr) - } - } else if d.NetDial != nil { - netDial = d.NetDial - } else { - netDialer := &net.Dialer{} - netDial = func(network, addr string) (net.Conn, error) { - return netDialer.DialContext(ctx, network, addr) - } - } - - // If needed, wrap the dial function to set the connection deadline. - if deadline, ok := ctx.Deadline(); ok { - forwardDial := netDial - netDial = func(network, addr string) (net.Conn, error) { - c, err := forwardDial(network, addr) - if err != nil { - return nil, err - } - err = c.SetDeadline(deadline) - if err != nil { - c.Close() - return nil, err - } - return c, nil - } - } - - // If needed, wrap the dial function to connect through a proxy. - if d.Proxy != nil { - proxyURL, err := d.Proxy(req) - if err != nil { - return nil, nil, err - } - if proxyURL != nil { - dialer, err := proxy_FromURL(proxyURL, netDialerFunc(netDial)) - if err != nil { - return nil, nil, err - } - netDial = dialer.Dial - } - } - - hostPort, hostNoPort := hostPortNoPort(u) - trace := httptrace.ContextClientTrace(ctx) - if trace != nil && trace.GetConn != nil { - trace.GetConn(hostPort) - } - - netConn, err := netDial("tcp", hostPort) - if trace != nil && trace.GotConn != nil { - trace.GotConn(httptrace.GotConnInfo{ - Conn: netConn, - }) - } - if err != nil { - return nil, nil, err - } - - defer func() { - if netConn != nil { - netConn.Close() - } - }() - - if u.Scheme == "https" { - cfg := cloneTLSConfig(d.TLSClientConfig) - if cfg.ServerName == "" { - cfg.ServerName = hostNoPort - } - tlsConn := tls.Client(netConn, cfg) - netConn = tlsConn - - var err error - if trace != nil { - err = doHandshakeWithTrace(trace, tlsConn, cfg) - } else { - err = doHandshake(tlsConn, cfg) - } - - if err != nil { - return nil, nil, err - } - } - - conn := newConn(netConn, false, d.ReadBufferSize, d.WriteBufferSize, d.WriteBufferPool, nil, nil) - - if err := req.Write(netConn); err != nil { - return nil, nil, err - } - - if trace != nil && trace.GotFirstResponseByte != nil { - if peek, err := conn.br.Peek(1); err == nil && len(peek) == 1 { - trace.GotFirstResponseByte() - } - } - - resp, err := http.ReadResponse(conn.br, req) - if err != nil { - return nil, nil, err - } - - if d.Jar != nil { - if rc := resp.Cookies(); len(rc) > 0 { - d.Jar.SetCookies(u, rc) - } - } - - if resp.StatusCode != 101 || - !strings.EqualFold(resp.Header.Get("Upgrade"), "websocket") || - !strings.EqualFold(resp.Header.Get("Connection"), "upgrade") || - resp.Header.Get("Sec-Websocket-Accept") != computeAcceptKey(challengeKey) { - // Before closing the network connection on return from this - // function, slurp up some of the response to aid application - // debugging. - buf := make([]byte, 1024) - n, _ := io.ReadFull(resp.Body, buf) - resp.Body = ioutil.NopCloser(bytes.NewReader(buf[:n])) - return nil, resp, ErrBadHandshake - } - - for _, ext := range parseExtensions(resp.Header) { - if ext[""] != "permessage-deflate" { - continue - } - _, snct := ext["server_no_context_takeover"] - _, cnct := ext["client_no_context_takeover"] - if !snct || !cnct { - return nil, resp, errInvalidCompression - } - conn.newCompressionWriter = compressNoContextTakeover - conn.newDecompressionReader = decompressNoContextTakeover - break - } - - resp.Body = ioutil.NopCloser(bytes.NewReader([]byte{})) - conn.subprotocol = resp.Header.Get("Sec-Websocket-Protocol") - - netConn.SetDeadline(time.Time{}) - netConn = nil // to avoid close in defer. - return conn, resp, nil -} - -func doHandshake(tlsConn *tls.Conn, cfg *tls.Config) error { - if err := tlsConn.Handshake(); err != nil { - return err - } - if !cfg.InsecureSkipVerify { - if err := tlsConn.VerifyHostname(cfg.ServerName); err != nil { - return err - } - } - return nil -} diff --git a/vendor/github.com/gorilla/websocket/client_clone.go b/vendor/github.com/gorilla/websocket/client_clone.go deleted file mode 100644 index 4f0d9437..00000000 --- a/vendor/github.com/gorilla/websocket/client_clone.go +++ /dev/null @@ -1,16 +0,0 @@ -// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build go1.8 - -package websocket - -import "crypto/tls" - -func cloneTLSConfig(cfg *tls.Config) *tls.Config { - if cfg == nil { - return &tls.Config{} - } - return cfg.Clone() -} diff --git a/vendor/github.com/gorilla/websocket/client_clone_legacy.go b/vendor/github.com/gorilla/websocket/client_clone_legacy.go deleted file mode 100644 index babb007f..00000000 --- a/vendor/github.com/gorilla/websocket/client_clone_legacy.go +++ /dev/null @@ -1,38 +0,0 @@ -// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !go1.8 - -package websocket - -import "crypto/tls" - -// cloneTLSConfig clones all public fields except the fields -// SessionTicketsDisabled and SessionTicketKey. This avoids copying the -// sync.Mutex in the sync.Once and makes it safe to call cloneTLSConfig on a -// config in active use. -func cloneTLSConfig(cfg *tls.Config) *tls.Config { - if cfg == nil { - return &tls.Config{} - } - return &tls.Config{ - Rand: cfg.Rand, - Time: cfg.Time, - Certificates: cfg.Certificates, - NameToCertificate: cfg.NameToCertificate, - GetCertificate: cfg.GetCertificate, - RootCAs: cfg.RootCAs, - NextProtos: cfg.NextProtos, - ServerName: cfg.ServerName, - ClientAuth: cfg.ClientAuth, - ClientCAs: cfg.ClientCAs, - InsecureSkipVerify: cfg.InsecureSkipVerify, - CipherSuites: cfg.CipherSuites, - PreferServerCipherSuites: cfg.PreferServerCipherSuites, - ClientSessionCache: cfg.ClientSessionCache, - MinVersion: cfg.MinVersion, - MaxVersion: cfg.MaxVersion, - CurvePreferences: cfg.CurvePreferences, - } -} diff --git a/vendor/github.com/gorilla/websocket/compression.go b/vendor/github.com/gorilla/websocket/compression.go deleted file mode 100644 index 813ffb1e..00000000 --- a/vendor/github.com/gorilla/websocket/compression.go +++ /dev/null @@ -1,148 +0,0 @@ -// Copyright 2017 The Gorilla WebSocket Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package websocket - -import ( - "compress/flate" - "errors" - "io" - "strings" - "sync" -) - -const ( - minCompressionLevel = -2 // flate.HuffmanOnly not defined in Go < 1.6 - maxCompressionLevel = flate.BestCompression - defaultCompressionLevel = 1 -) - -var ( - flateWriterPools [maxCompressionLevel - minCompressionLevel + 1]sync.Pool - flateReaderPool = sync.Pool{New: func() interface{} { - return flate.NewReader(nil) - }} -) - -func decompressNoContextTakeover(r io.Reader) io.ReadCloser { - const tail = - // Add four bytes as specified in RFC - "\x00\x00\xff\xff" + - // Add final block to squelch unexpected EOF error from flate reader. - "\x01\x00\x00\xff\xff" - - fr, _ := flateReaderPool.Get().(io.ReadCloser) - fr.(flate.Resetter).Reset(io.MultiReader(r, strings.NewReader(tail)), nil) - return &flateReadWrapper{fr} -} - -func isValidCompressionLevel(level int) bool { - return minCompressionLevel <= level && level <= maxCompressionLevel -} - -func compressNoContextTakeover(w io.WriteCloser, level int) io.WriteCloser { - p := &flateWriterPools[level-minCompressionLevel] - tw := &truncWriter{w: w} - fw, _ := p.Get().(*flate.Writer) - if fw == nil { - fw, _ = flate.NewWriter(tw, level) - } else { - fw.Reset(tw) - } - return &flateWriteWrapper{fw: fw, tw: tw, p: p} -} - -// truncWriter is an io.Writer that writes all but the last four bytes of the -// stream to another io.Writer. -type truncWriter struct { - w io.WriteCloser - n int - p [4]byte -} - -func (w *truncWriter) Write(p []byte) (int, error) { - n := 0 - - // fill buffer first for simplicity. - if w.n < len(w.p) { - n = copy(w.p[w.n:], p) - p = p[n:] - w.n += n - if len(p) == 0 { - return n, nil - } - } - - m := len(p) - if m > len(w.p) { - m = len(w.p) - } - - if nn, err := w.w.Write(w.p[:m]); err != nil { - return n + nn, err - } - - copy(w.p[:], w.p[m:]) - copy(w.p[len(w.p)-m:], p[len(p)-m:]) - nn, err := w.w.Write(p[:len(p)-m]) - return n + nn, err -} - -type flateWriteWrapper struct { - fw *flate.Writer - tw *truncWriter - p *sync.Pool -} - -func (w *flateWriteWrapper) Write(p []byte) (int, error) { - if w.fw == nil { - return 0, errWriteClosed - } - return w.fw.Write(p) -} - -func (w *flateWriteWrapper) Close() error { - if w.fw == nil { - return errWriteClosed - } - err1 := w.fw.Flush() - w.p.Put(w.fw) - w.fw = nil - if w.tw.p != [4]byte{0, 0, 0xff, 0xff} { - return errors.New("websocket: internal error, unexpected bytes at end of flate stream") - } - err2 := w.tw.w.Close() - if err1 != nil { - return err1 - } - return err2 -} - -type flateReadWrapper struct { - fr io.ReadCloser -} - -func (r *flateReadWrapper) Read(p []byte) (int, error) { - if r.fr == nil { - return 0, io.ErrClosedPipe - } - n, err := r.fr.Read(p) - if err == io.EOF { - // Preemptively place the reader back in the pool. This helps with - // scenarios where the application does not call NextReader() soon after - // this final read. - r.Close() - } - return n, err -} - -func (r *flateReadWrapper) Close() error { - if r.fr == nil { - return io.ErrClosedPipe - } - err := r.fr.Close() - flateReaderPool.Put(r.fr) - r.fr = nil - return err -} diff --git a/vendor/github.com/gorilla/websocket/conn.go b/vendor/github.com/gorilla/websocket/conn.go deleted file mode 100644 index 9971ea36..00000000 --- a/vendor/github.com/gorilla/websocket/conn.go +++ /dev/null @@ -1,1163 +0,0 @@ -// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package websocket - -import ( - "bufio" - "encoding/binary" - "errors" - "io" - "io/ioutil" - "math/rand" - "net" - "strconv" - "sync" - "time" - "unicode/utf8" -) - -const ( - // Frame header byte 0 bits from Section 5.2 of RFC 6455 - finalBit = 1 << 7 - rsv1Bit = 1 << 6 - rsv2Bit = 1 << 5 - rsv3Bit = 1 << 4 - - // Frame header byte 1 bits from Section 5.2 of RFC 6455 - maskBit = 1 << 7 - - maxFrameHeaderSize = 2 + 8 + 4 // Fixed header + length + mask - maxControlFramePayloadSize = 125 - - writeWait = time.Second - - defaultReadBufferSize = 4096 - defaultWriteBufferSize = 4096 - - continuationFrame = 0 - noFrame = -1 -) - -// Close codes defined in RFC 6455, section 11.7. -const ( - CloseNormalClosure = 1000 - CloseGoingAway = 1001 - CloseProtocolError = 1002 - CloseUnsupportedData = 1003 - CloseNoStatusReceived = 1005 - CloseAbnormalClosure = 1006 - CloseInvalidFramePayloadData = 1007 - ClosePolicyViolation = 1008 - CloseMessageTooBig = 1009 - CloseMandatoryExtension = 1010 - CloseInternalServerErr = 1011 - CloseServiceRestart = 1012 - CloseTryAgainLater = 1013 - CloseTLSHandshake = 1015 -) - -// The message types are defined in RFC 6455, section 11.8. -const ( - // TextMessage denotes a text data message. The text message payload is - // interpreted as UTF-8 encoded text data. - TextMessage = 1 - - // BinaryMessage denotes a binary data message. - BinaryMessage = 2 - - // CloseMessage denotes a close control message. The optional message - // payload contains a numeric code and text. Use the FormatCloseMessage - // function to format a close message payload. - CloseMessage = 8 - - // PingMessage denotes a ping control message. The optional message payload - // is UTF-8 encoded text. - PingMessage = 9 - - // PongMessage denotes a pong control message. The optional message payload - // is UTF-8 encoded text. - PongMessage = 10 -) - -// ErrCloseSent is returned when the application writes a message to the -// connection after sending a close message. -var ErrCloseSent = errors.New("websocket: close sent") - -// ErrReadLimit is returned when reading a message that is larger than the -// read limit set for the connection. -var ErrReadLimit = errors.New("websocket: read limit exceeded") - -// netError satisfies the net Error interface. -type netError struct { - msg string - temporary bool - timeout bool -} - -func (e *netError) Error() string { return e.msg } -func (e *netError) Temporary() bool { return e.temporary } -func (e *netError) Timeout() bool { return e.timeout } - -// CloseError represents a close message. -type CloseError struct { - // Code is defined in RFC 6455, section 11.7. - Code int - - // Text is the optional text payload. - Text string -} - -func (e *CloseError) Error() string { - s := []byte("websocket: close ") - s = strconv.AppendInt(s, int64(e.Code), 10) - switch e.Code { - case CloseNormalClosure: - s = append(s, " (normal)"...) - case CloseGoingAway: - s = append(s, " (going away)"...) - case CloseProtocolError: - s = append(s, " (protocol error)"...) - case CloseUnsupportedData: - s = append(s, " (unsupported data)"...) - case CloseNoStatusReceived: - s = append(s, " (no status)"...) - case CloseAbnormalClosure: - s = append(s, " (abnormal closure)"...) - case CloseInvalidFramePayloadData: - s = append(s, " (invalid payload data)"...) - case ClosePolicyViolation: - s = append(s, " (policy violation)"...) - case CloseMessageTooBig: - s = append(s, " (message too big)"...) - case CloseMandatoryExtension: - s = append(s, " (mandatory extension missing)"...) - case CloseInternalServerErr: - s = append(s, " (internal server error)"...) - case CloseTLSHandshake: - s = append(s, " (TLS handshake error)"...) - } - if e.Text != "" { - s = append(s, ": "...) - s = append(s, e.Text...) - } - return string(s) -} - -// IsCloseError returns boolean indicating whether the error is a *CloseError -// with one of the specified codes. -func IsCloseError(err error, codes ...int) bool { - if e, ok := err.(*CloseError); ok { - for _, code := range codes { - if e.Code == code { - return true - } - } - } - return false -} - -// IsUnexpectedCloseError returns boolean indicating whether the error is a -// *CloseError with a code not in the list of expected codes. -func IsUnexpectedCloseError(err error, expectedCodes ...int) bool { - if e, ok := err.(*CloseError); ok { - for _, code := range expectedCodes { - if e.Code == code { - return false - } - } - return true - } - return false -} - -var ( - errWriteTimeout = &netError{msg: "websocket: write timeout", timeout: true, temporary: true} - errUnexpectedEOF = &CloseError{Code: CloseAbnormalClosure, Text: io.ErrUnexpectedEOF.Error()} - errBadWriteOpCode = errors.New("websocket: bad write message type") - errWriteClosed = errors.New("websocket: write closed") - errInvalidControlFrame = errors.New("websocket: invalid control frame") -) - -func newMaskKey() [4]byte { - n := rand.Uint32() - return [4]byte{byte(n), byte(n >> 8), byte(n >> 16), byte(n >> 24)} -} - -func hideTempErr(err error) error { - if e, ok := err.(net.Error); ok && e.Temporary() { - err = &netError{msg: e.Error(), timeout: e.Timeout()} - } - return err -} - -func isControl(frameType int) bool { - return frameType == CloseMessage || frameType == PingMessage || frameType == PongMessage -} - -func isData(frameType int) bool { - return frameType == TextMessage || frameType == BinaryMessage -} - -var validReceivedCloseCodes = map[int]bool{ - // see http://www.iana.org/assignments/websocket/websocket.xhtml#close-code-number - - CloseNormalClosure: true, - CloseGoingAway: true, - CloseProtocolError: true, - CloseUnsupportedData: true, - CloseNoStatusReceived: false, - CloseAbnormalClosure: false, - CloseInvalidFramePayloadData: true, - ClosePolicyViolation: true, - CloseMessageTooBig: true, - CloseMandatoryExtension: true, - CloseInternalServerErr: true, - CloseServiceRestart: true, - CloseTryAgainLater: true, - CloseTLSHandshake: false, -} - -func isValidReceivedCloseCode(code int) bool { - return validReceivedCloseCodes[code] || (code >= 3000 && code <= 4999) -} - -// BufferPool represents a pool of buffers. The *sync.Pool type satisfies this -// interface. The type of the value stored in a pool is not specified. -type BufferPool interface { - // Get gets a value from the pool or returns nil if the pool is empty. - Get() interface{} - // Put adds a value to the pool. - Put(interface{}) -} - -// writePoolData is the type added to the write buffer pool. This wrapper is -// used to prevent applications from peeking at and depending on the values -// added to the pool. -type writePoolData struct{ buf []byte } - -// The Conn type represents a WebSocket connection. -type Conn struct { - conn net.Conn - isServer bool - subprotocol string - - // Write fields - mu chan bool // used as mutex to protect write to conn - writeBuf []byte // frame is constructed in this buffer. - writePool BufferPool - writeBufSize int - writeDeadline time.Time - writer io.WriteCloser // the current writer returned to the application - isWriting bool // for best-effort concurrent write detection - - writeErrMu sync.Mutex - writeErr error - - enableWriteCompression bool - compressionLevel int - newCompressionWriter func(io.WriteCloser, int) io.WriteCloser - - // Read fields - reader io.ReadCloser // the current reader returned to the application - readErr error - br *bufio.Reader - readRemaining int64 // bytes remaining in current frame. - readFinal bool // true the current message has more frames. - readLength int64 // Message size. - readLimit int64 // Maximum message size. - readMaskPos int - readMaskKey [4]byte - handlePong func(string) error - handlePing func(string) error - handleClose func(int, string) error - readErrCount int - messageReader *messageReader // the current low-level reader - - readDecompress bool // whether last read frame had RSV1 set - newDecompressionReader func(io.Reader) io.ReadCloser -} - -func newConn(conn net.Conn, isServer bool, readBufferSize, writeBufferSize int, writeBufferPool BufferPool, br *bufio.Reader, writeBuf []byte) *Conn { - - if br == nil { - if readBufferSize == 0 { - readBufferSize = defaultReadBufferSize - } else if readBufferSize < maxControlFramePayloadSize { - // must be large enough for control frame - readBufferSize = maxControlFramePayloadSize - } - br = bufio.NewReaderSize(conn, readBufferSize) - } - - if writeBufferSize <= 0 { - writeBufferSize = defaultWriteBufferSize - } - writeBufferSize += maxFrameHeaderSize - - if writeBuf == nil && writeBufferPool == nil { - writeBuf = make([]byte, writeBufferSize) - } - - mu := make(chan bool, 1) - mu <- true - c := &Conn{ - isServer: isServer, - br: br, - conn: conn, - mu: mu, - readFinal: true, - writeBuf: writeBuf, - writePool: writeBufferPool, - writeBufSize: writeBufferSize, - enableWriteCompression: true, - compressionLevel: defaultCompressionLevel, - } - c.SetCloseHandler(nil) - c.SetPingHandler(nil) - c.SetPongHandler(nil) - return c -} - -// Subprotocol returns the negotiated protocol for the connection. -func (c *Conn) Subprotocol() string { - return c.subprotocol -} - -// Close closes the underlying network connection without sending or waiting -// for a close message. -func (c *Conn) Close() error { - return c.conn.Close() -} - -// LocalAddr returns the local network address. -func (c *Conn) LocalAddr() net.Addr { - return c.conn.LocalAddr() -} - -// RemoteAddr returns the remote network address. -func (c *Conn) RemoteAddr() net.Addr { - return c.conn.RemoteAddr() -} - -// Write methods - -func (c *Conn) writeFatal(err error) error { - err = hideTempErr(err) - c.writeErrMu.Lock() - if c.writeErr == nil { - c.writeErr = err - } - c.writeErrMu.Unlock() - return err -} - -func (c *Conn) read(n int) ([]byte, error) { - p, err := c.br.Peek(n) - if err == io.EOF { - err = errUnexpectedEOF - } - c.br.Discard(len(p)) - return p, err -} - -func (c *Conn) write(frameType int, deadline time.Time, buf0, buf1 []byte) error { - <-c.mu - defer func() { c.mu <- true }() - - c.writeErrMu.Lock() - err := c.writeErr - c.writeErrMu.Unlock() - if err != nil { - return err - } - - c.conn.SetWriteDeadline(deadline) - if len(buf1) == 0 { - _, err = c.conn.Write(buf0) - } else { - err = c.writeBufs(buf0, buf1) - } - if err != nil { - return c.writeFatal(err) - } - if frameType == CloseMessage { - c.writeFatal(ErrCloseSent) - } - return nil -} - -// WriteControl writes a control message with the given deadline. The allowed -// message types are CloseMessage, PingMessage and PongMessage. -func (c *Conn) WriteControl(messageType int, data []byte, deadline time.Time) error { - if !isControl(messageType) { - return errBadWriteOpCode - } - if len(data) > maxControlFramePayloadSize { - return errInvalidControlFrame - } - - b0 := byte(messageType) | finalBit - b1 := byte(len(data)) - if !c.isServer { - b1 |= maskBit - } - - buf := make([]byte, 0, maxFrameHeaderSize+maxControlFramePayloadSize) - buf = append(buf, b0, b1) - - if c.isServer { - buf = append(buf, data...) - } else { - key := newMaskKey() - buf = append(buf, key[:]...) - buf = append(buf, data...) - maskBytes(key, 0, buf[6:]) - } - - d := time.Hour * 1000 - if !deadline.IsZero() { - d = deadline.Sub(time.Now()) - if d < 0 { - return errWriteTimeout - } - } - - timer := time.NewTimer(d) - select { - case <-c.mu: - timer.Stop() - case <-timer.C: - return errWriteTimeout - } - defer func() { c.mu <- true }() - - c.writeErrMu.Lock() - err := c.writeErr - c.writeErrMu.Unlock() - if err != nil { - return err - } - - c.conn.SetWriteDeadline(deadline) - _, err = c.conn.Write(buf) - if err != nil { - return c.writeFatal(err) - } - if messageType == CloseMessage { - c.writeFatal(ErrCloseSent) - } - return err -} - -// beginMessage prepares a connection and message writer for a new message. -func (c *Conn) beginMessage(mw *messageWriter, messageType int) error { - // Close previous writer if not already closed by the application. It's - // probably better to return an error in this situation, but we cannot - // change this without breaking existing applications. - if c.writer != nil { - c.writer.Close() - c.writer = nil - } - - if !isControl(messageType) && !isData(messageType) { - return errBadWriteOpCode - } - - c.writeErrMu.Lock() - err := c.writeErr - c.writeErrMu.Unlock() - if err != nil { - return err - } - - mw.c = c - mw.frameType = messageType - mw.pos = maxFrameHeaderSize - - if c.writeBuf == nil { - wpd, ok := c.writePool.Get().(writePoolData) - if ok { - c.writeBuf = wpd.buf - } else { - c.writeBuf = make([]byte, c.writeBufSize) - } - } - return nil -} - -// NextWriter returns a writer for the next message to send. The writer's Close -// method flushes the complete message to the network. -// -// There can be at most one open writer on a connection. NextWriter closes the -// previous writer if the application has not already done so. -// -// All message types (TextMessage, BinaryMessage, CloseMessage, PingMessage and -// PongMessage) are supported. -func (c *Conn) NextWriter(messageType int) (io.WriteCloser, error) { - var mw messageWriter - if err := c.beginMessage(&mw, messageType); err != nil { - return nil, err - } - c.writer = &mw - if c.newCompressionWriter != nil && c.enableWriteCompression && isData(messageType) { - w := c.newCompressionWriter(c.writer, c.compressionLevel) - mw.compress = true - c.writer = w - } - return c.writer, nil -} - -type messageWriter struct { - c *Conn - compress bool // whether next call to flushFrame should set RSV1 - pos int // end of data in writeBuf. - frameType int // type of the current frame. - err error -} - -func (w *messageWriter) endMessage(err error) error { - if w.err != nil { - return err - } - c := w.c - w.err = err - c.writer = nil - if c.writePool != nil { - c.writePool.Put(writePoolData{buf: c.writeBuf}) - c.writeBuf = nil - } - return err -} - -// flushFrame writes buffered data and extra as a frame to the network. The -// final argument indicates that this is the last frame in the message. -func (w *messageWriter) flushFrame(final bool, extra []byte) error { - c := w.c - length := w.pos - maxFrameHeaderSize + len(extra) - - // Check for invalid control frames. - if isControl(w.frameType) && - (!final || length > maxControlFramePayloadSize) { - return w.endMessage(errInvalidControlFrame) - } - - b0 := byte(w.frameType) - if final { - b0 |= finalBit - } - if w.compress { - b0 |= rsv1Bit - } - w.compress = false - - b1 := byte(0) - if !c.isServer { - b1 |= maskBit - } - - // Assume that the frame starts at beginning of c.writeBuf. - framePos := 0 - if c.isServer { - // Adjust up if mask not included in the header. - framePos = 4 - } - - switch { - case length >= 65536: - c.writeBuf[framePos] = b0 - c.writeBuf[framePos+1] = b1 | 127 - binary.BigEndian.PutUint64(c.writeBuf[framePos+2:], uint64(length)) - case length > 125: - framePos += 6 - c.writeBuf[framePos] = b0 - c.writeBuf[framePos+1] = b1 | 126 - binary.BigEndian.PutUint16(c.writeBuf[framePos+2:], uint16(length)) - default: - framePos += 8 - c.writeBuf[framePos] = b0 - c.writeBuf[framePos+1] = b1 | byte(length) - } - - if !c.isServer { - key := newMaskKey() - copy(c.writeBuf[maxFrameHeaderSize-4:], key[:]) - maskBytes(key, 0, c.writeBuf[maxFrameHeaderSize:w.pos]) - if len(extra) > 0 { - return w.endMessage(c.writeFatal(errors.New("websocket: internal error, extra used in client mode"))) - } - } - - // Write the buffers to the connection with best-effort detection of - // concurrent writes. See the concurrency section in the package - // documentation for more info. - - if c.isWriting { - panic("concurrent write to websocket connection") - } - c.isWriting = true - - err := c.write(w.frameType, c.writeDeadline, c.writeBuf[framePos:w.pos], extra) - - if !c.isWriting { - panic("concurrent write to websocket connection") - } - c.isWriting = false - - if err != nil { - return w.endMessage(err) - } - - if final { - w.endMessage(errWriteClosed) - return nil - } - - // Setup for next frame. - w.pos = maxFrameHeaderSize - w.frameType = continuationFrame - return nil -} - -func (w *messageWriter) ncopy(max int) (int, error) { - n := len(w.c.writeBuf) - w.pos - if n <= 0 { - if err := w.flushFrame(false, nil); err != nil { - return 0, err - } - n = len(w.c.writeBuf) - w.pos - } - if n > max { - n = max - } - return n, nil -} - -func (w *messageWriter) Write(p []byte) (int, error) { - if w.err != nil { - return 0, w.err - } - - if len(p) > 2*len(w.c.writeBuf) && w.c.isServer { - // Don't buffer large messages. - err := w.flushFrame(false, p) - if err != nil { - return 0, err - } - return len(p), nil - } - - nn := len(p) - for len(p) > 0 { - n, err := w.ncopy(len(p)) - if err != nil { - return 0, err - } - copy(w.c.writeBuf[w.pos:], p[:n]) - w.pos += n - p = p[n:] - } - return nn, nil -} - -func (w *messageWriter) WriteString(p string) (int, error) { - if w.err != nil { - return 0, w.err - } - - nn := len(p) - for len(p) > 0 { - n, err := w.ncopy(len(p)) - if err != nil { - return 0, err - } - copy(w.c.writeBuf[w.pos:], p[:n]) - w.pos += n - p = p[n:] - } - return nn, nil -} - -func (w *messageWriter) ReadFrom(r io.Reader) (nn int64, err error) { - if w.err != nil { - return 0, w.err - } - for { - if w.pos == len(w.c.writeBuf) { - err = w.flushFrame(false, nil) - if err != nil { - break - } - } - var n int - n, err = r.Read(w.c.writeBuf[w.pos:]) - w.pos += n - nn += int64(n) - if err != nil { - if err == io.EOF { - err = nil - } - break - } - } - return nn, err -} - -func (w *messageWriter) Close() error { - if w.err != nil { - return w.err - } - return w.flushFrame(true, nil) -} - -// WritePreparedMessage writes prepared message into connection. -func (c *Conn) WritePreparedMessage(pm *PreparedMessage) error { - frameType, frameData, err := pm.frame(prepareKey{ - isServer: c.isServer, - compress: c.newCompressionWriter != nil && c.enableWriteCompression && isData(pm.messageType), - compressionLevel: c.compressionLevel, - }) - if err != nil { - return err - } - if c.isWriting { - panic("concurrent write to websocket connection") - } - c.isWriting = true - err = c.write(frameType, c.writeDeadline, frameData, nil) - if !c.isWriting { - panic("concurrent write to websocket connection") - } - c.isWriting = false - return err -} - -// WriteMessage is a helper method for getting a writer using NextWriter, -// writing the message and closing the writer. -func (c *Conn) WriteMessage(messageType int, data []byte) error { - - if c.isServer && (c.newCompressionWriter == nil || !c.enableWriteCompression) { - // Fast path with no allocations and single frame. - - var mw messageWriter - if err := c.beginMessage(&mw, messageType); err != nil { - return err - } - n := copy(c.writeBuf[mw.pos:], data) - mw.pos += n - data = data[n:] - return mw.flushFrame(true, data) - } - - w, err := c.NextWriter(messageType) - if err != nil { - return err - } - if _, err = w.Write(data); err != nil { - return err - } - return w.Close() -} - -// SetWriteDeadline sets the write deadline on the underlying network -// connection. After a write has timed out, the websocket state is corrupt and -// all future writes will return an error. A zero value for t means writes will -// not time out. -func (c *Conn) SetWriteDeadline(t time.Time) error { - c.writeDeadline = t - return nil -} - -// Read methods - -func (c *Conn) advanceFrame() (int, error) { - // 1. Skip remainder of previous frame. - - if c.readRemaining > 0 { - if _, err := io.CopyN(ioutil.Discard, c.br, c.readRemaining); err != nil { - return noFrame, err - } - } - - // 2. Read and parse first two bytes of frame header. - - p, err := c.read(2) - if err != nil { - return noFrame, err - } - - final := p[0]&finalBit != 0 - frameType := int(p[0] & 0xf) - mask := p[1]&maskBit != 0 - c.readRemaining = int64(p[1] & 0x7f) - - c.readDecompress = false - if c.newDecompressionReader != nil && (p[0]&rsv1Bit) != 0 { - c.readDecompress = true - p[0] &^= rsv1Bit - } - - if rsv := p[0] & (rsv1Bit | rsv2Bit | rsv3Bit); rsv != 0 { - return noFrame, c.handleProtocolError("unexpected reserved bits 0x" + strconv.FormatInt(int64(rsv), 16)) - } - - switch frameType { - case CloseMessage, PingMessage, PongMessage: - if c.readRemaining > maxControlFramePayloadSize { - return noFrame, c.handleProtocolError("control frame length > 125") - } - if !final { - return noFrame, c.handleProtocolError("control frame not final") - } - case TextMessage, BinaryMessage: - if !c.readFinal { - return noFrame, c.handleProtocolError("message start before final message frame") - } - c.readFinal = final - case continuationFrame: - if c.readFinal { - return noFrame, c.handleProtocolError("continuation after final message frame") - } - c.readFinal = final - default: - return noFrame, c.handleProtocolError("unknown opcode " + strconv.Itoa(frameType)) - } - - // 3. Read and parse frame length. - - switch c.readRemaining { - case 126: - p, err := c.read(2) - if err != nil { - return noFrame, err - } - c.readRemaining = int64(binary.BigEndian.Uint16(p)) - case 127: - p, err := c.read(8) - if err != nil { - return noFrame, err - } - c.readRemaining = int64(binary.BigEndian.Uint64(p)) - } - - // 4. Handle frame masking. - - if mask != c.isServer { - return noFrame, c.handleProtocolError("incorrect mask flag") - } - - if mask { - c.readMaskPos = 0 - p, err := c.read(len(c.readMaskKey)) - if err != nil { - return noFrame, err - } - copy(c.readMaskKey[:], p) - } - - // 5. For text and binary messages, enforce read limit and return. - - if frameType == continuationFrame || frameType == TextMessage || frameType == BinaryMessage { - - c.readLength += c.readRemaining - if c.readLimit > 0 && c.readLength > c.readLimit { - c.WriteControl(CloseMessage, FormatCloseMessage(CloseMessageTooBig, ""), time.Now().Add(writeWait)) - return noFrame, ErrReadLimit - } - - return frameType, nil - } - - // 6. Read control frame payload. - - var payload []byte - if c.readRemaining > 0 { - payload, err = c.read(int(c.readRemaining)) - c.readRemaining = 0 - if err != nil { - return noFrame, err - } - if c.isServer { - maskBytes(c.readMaskKey, 0, payload) - } - } - - // 7. Process control frame payload. - - switch frameType { - case PongMessage: - if err := c.handlePong(string(payload)); err != nil { - return noFrame, err - } - case PingMessage: - if err := c.handlePing(string(payload)); err != nil { - return noFrame, err - } - case CloseMessage: - closeCode := CloseNoStatusReceived - closeText := "" - if len(payload) >= 2 { - closeCode = int(binary.BigEndian.Uint16(payload)) - if !isValidReceivedCloseCode(closeCode) { - return noFrame, c.handleProtocolError("invalid close code") - } - closeText = string(payload[2:]) - if !utf8.ValidString(closeText) { - return noFrame, c.handleProtocolError("invalid utf8 payload in close frame") - } - } - if err := c.handleClose(closeCode, closeText); err != nil { - return noFrame, err - } - return noFrame, &CloseError{Code: closeCode, Text: closeText} - } - - return frameType, nil -} - -func (c *Conn) handleProtocolError(message string) error { - c.WriteControl(CloseMessage, FormatCloseMessage(CloseProtocolError, message), time.Now().Add(writeWait)) - return errors.New("websocket: " + message) -} - -// NextReader returns the next data message received from the peer. The -// returned messageType is either TextMessage or BinaryMessage. -// -// There can be at most one open reader on a connection. NextReader discards -// the previous message if the application has not already consumed it. -// -// Applications must break out of the application's read loop when this method -// returns a non-nil error value. Errors returned from this method are -// permanent. Once this method returns a non-nil error, all subsequent calls to -// this method return the same error. -func (c *Conn) NextReader() (messageType int, r io.Reader, err error) { - // Close previous reader, only relevant for decompression. - if c.reader != nil { - c.reader.Close() - c.reader = nil - } - - c.messageReader = nil - c.readLength = 0 - - for c.readErr == nil { - frameType, err := c.advanceFrame() - if err != nil { - c.readErr = hideTempErr(err) - break - } - if frameType == TextMessage || frameType == BinaryMessage { - c.messageReader = &messageReader{c} - c.reader = c.messageReader - if c.readDecompress { - c.reader = c.newDecompressionReader(c.reader) - } - return frameType, c.reader, nil - } - } - - // Applications that do handle the error returned from this method spin in - // tight loop on connection failure. To help application developers detect - // this error, panic on repeated reads to the failed connection. - c.readErrCount++ - if c.readErrCount >= 1000 { - panic("repeated read on failed websocket connection") - } - - return noFrame, nil, c.readErr -} - -type messageReader struct{ c *Conn } - -func (r *messageReader) Read(b []byte) (int, error) { - c := r.c - if c.messageReader != r { - return 0, io.EOF - } - - for c.readErr == nil { - - if c.readRemaining > 0 { - if int64(len(b)) > c.readRemaining { - b = b[:c.readRemaining] - } - n, err := c.br.Read(b) - c.readErr = hideTempErr(err) - if c.isServer { - c.readMaskPos = maskBytes(c.readMaskKey, c.readMaskPos, b[:n]) - } - c.readRemaining -= int64(n) - if c.readRemaining > 0 && c.readErr == io.EOF { - c.readErr = errUnexpectedEOF - } - return n, c.readErr - } - - if c.readFinal { - c.messageReader = nil - return 0, io.EOF - } - - frameType, err := c.advanceFrame() - switch { - case err != nil: - c.readErr = hideTempErr(err) - case frameType == TextMessage || frameType == BinaryMessage: - c.readErr = errors.New("websocket: internal error, unexpected text or binary in Reader") - } - } - - err := c.readErr - if err == io.EOF && c.messageReader == r { - err = errUnexpectedEOF - } - return 0, err -} - -func (r *messageReader) Close() error { - return nil -} - -// ReadMessage is a helper method for getting a reader using NextReader and -// reading from that reader to a buffer. -func (c *Conn) ReadMessage() (messageType int, p []byte, err error) { - var r io.Reader - messageType, r, err = c.NextReader() - if err != nil { - return messageType, nil, err - } - p, err = ioutil.ReadAll(r) - return messageType, p, err -} - -// SetReadDeadline sets the read deadline on the underlying network connection. -// After a read has timed out, the websocket connection state is corrupt and -// all future reads will return an error. A zero value for t means reads will -// not time out. -func (c *Conn) SetReadDeadline(t time.Time) error { - return c.conn.SetReadDeadline(t) -} - -// SetReadLimit sets the maximum size in bytes for a message read from the peer. If a -// message exceeds the limit, the connection sends a close message to the peer -// and returns ErrReadLimit to the application. -func (c *Conn) SetReadLimit(limit int64) { - c.readLimit = limit -} - -// CloseHandler returns the current close handler -func (c *Conn) CloseHandler() func(code int, text string) error { - return c.handleClose -} - -// SetCloseHandler sets the handler for close messages received from the peer. -// The code argument to h is the received close code or CloseNoStatusReceived -// if the close message is empty. The default close handler sends a close -// message back to the peer. -// -// The handler function is called from the NextReader, ReadMessage and message -// reader Read methods. The application must read the connection to process -// close messages as described in the section on Control Messages above. -// -// The connection read methods return a CloseError when a close message is -// received. Most applications should handle close messages as part of their -// normal error handling. Applications should only set a close handler when the -// application must perform some action before sending a close message back to -// the peer. -func (c *Conn) SetCloseHandler(h func(code int, text string) error) { - if h == nil { - h = func(code int, text string) error { - message := FormatCloseMessage(code, "") - c.WriteControl(CloseMessage, message, time.Now().Add(writeWait)) - return nil - } - } - c.handleClose = h -} - -// PingHandler returns the current ping handler -func (c *Conn) PingHandler() func(appData string) error { - return c.handlePing -} - -// SetPingHandler sets the handler for ping messages received from the peer. -// The appData argument to h is the PING message application data. The default -// ping handler sends a pong to the peer. -// -// The handler function is called from the NextReader, ReadMessage and message -// reader Read methods. The application must read the connection to process -// ping messages as described in the section on Control Messages above. -func (c *Conn) SetPingHandler(h func(appData string) error) { - if h == nil { - h = func(message string) error { - err := c.WriteControl(PongMessage, []byte(message), time.Now().Add(writeWait)) - if err == ErrCloseSent { - return nil - } else if e, ok := err.(net.Error); ok && e.Temporary() { - return nil - } - return err - } - } - c.handlePing = h -} - -// PongHandler returns the current pong handler -func (c *Conn) PongHandler() func(appData string) error { - return c.handlePong -} - -// SetPongHandler sets the handler for pong messages received from the peer. -// The appData argument to h is the PONG message application data. The default -// pong handler does nothing. -// -// The handler function is called from the NextReader, ReadMessage and message -// reader Read methods. The application must read the connection to process -// pong messages as described in the section on Control Messages above. -func (c *Conn) SetPongHandler(h func(appData string) error) { - if h == nil { - h = func(string) error { return nil } - } - c.handlePong = h -} - -// UnderlyingConn returns the internal net.Conn. This can be used to further -// modifications to connection specific flags. -func (c *Conn) UnderlyingConn() net.Conn { - return c.conn -} - -// EnableWriteCompression enables and disables write compression of -// subsequent text and binary messages. This function is a noop if -// compression was not negotiated with the peer. -func (c *Conn) EnableWriteCompression(enable bool) { - c.enableWriteCompression = enable -} - -// SetCompressionLevel sets the flate compression level for subsequent text and -// binary messages. This function is a noop if compression was not negotiated -// with the peer. See the compress/flate package for a description of -// compression levels. -func (c *Conn) SetCompressionLevel(level int) error { - if !isValidCompressionLevel(level) { - return errors.New("websocket: invalid compression level") - } - c.compressionLevel = level - return nil -} - -// FormatCloseMessage formats closeCode and text as a WebSocket close message. -// An empty message is returned for code CloseNoStatusReceived. -func FormatCloseMessage(closeCode int, text string) []byte { - if closeCode == CloseNoStatusReceived { - // Return empty message because it's illegal to send - // CloseNoStatusReceived. Return non-nil value in case application - // checks for nil. - return []byte{} - } - buf := make([]byte, 2+len(text)) - binary.BigEndian.PutUint16(buf, uint16(closeCode)) - copy(buf[2:], text) - return buf -} diff --git a/vendor/github.com/gorilla/websocket/conn_write.go b/vendor/github.com/gorilla/websocket/conn_write.go deleted file mode 100644 index a509a21f..00000000 --- a/vendor/github.com/gorilla/websocket/conn_write.go +++ /dev/null @@ -1,15 +0,0 @@ -// Copyright 2016 The Gorilla WebSocket Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build go1.8 - -package websocket - -import "net" - -func (c *Conn) writeBufs(bufs ...[]byte) error { - b := net.Buffers(bufs) - _, err := b.WriteTo(c.conn) - return err -} diff --git a/vendor/github.com/gorilla/websocket/conn_write_legacy.go b/vendor/github.com/gorilla/websocket/conn_write_legacy.go deleted file mode 100644 index 37edaff5..00000000 --- a/vendor/github.com/gorilla/websocket/conn_write_legacy.go +++ /dev/null @@ -1,18 +0,0 @@ -// Copyright 2016 The Gorilla WebSocket Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !go1.8 - -package websocket - -func (c *Conn) writeBufs(bufs ...[]byte) error { - for _, buf := range bufs { - if len(buf) > 0 { - if _, err := c.conn.Write(buf); err != nil { - return err - } - } - } - return nil -} diff --git a/vendor/github.com/gorilla/websocket/doc.go b/vendor/github.com/gorilla/websocket/doc.go deleted file mode 100644 index c6f4df89..00000000 --- a/vendor/github.com/gorilla/websocket/doc.go +++ /dev/null @@ -1,227 +0,0 @@ -// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package websocket implements the WebSocket protocol defined in RFC 6455. -// -// Overview -// -// The Conn type represents a WebSocket connection. A server application calls -// the Upgrader.Upgrade method from an HTTP request handler to get a *Conn: -// -// var upgrader = websocket.Upgrader{ -// ReadBufferSize: 1024, -// WriteBufferSize: 1024, -// } -// -// func handler(w http.ResponseWriter, r *http.Request) { -// conn, err := upgrader.Upgrade(w, r, nil) -// if err != nil { -// log.Println(err) -// return -// } -// ... Use conn to send and receive messages. -// } -// -// Call the connection's WriteMessage and ReadMessage methods to send and -// receive messages as a slice of bytes. This snippet of code shows how to echo -// messages using these methods: -// -// for { -// messageType, p, err := conn.ReadMessage() -// if err != nil { -// log.Println(err) -// return -// } -// if err := conn.WriteMessage(messageType, p); err != nil { -// log.Println(err) -// return -// } -// } -// -// In above snippet of code, p is a []byte and messageType is an int with value -// websocket.BinaryMessage or websocket.TextMessage. -// -// An application can also send and receive messages using the io.WriteCloser -// and io.Reader interfaces. To send a message, call the connection NextWriter -// method to get an io.WriteCloser, write the message to the writer and close -// the writer when done. To receive a message, call the connection NextReader -// method to get an io.Reader and read until io.EOF is returned. This snippet -// shows how to echo messages using the NextWriter and NextReader methods: -// -// for { -// messageType, r, err := conn.NextReader() -// if err != nil { -// return -// } -// w, err := conn.NextWriter(messageType) -// if err != nil { -// return err -// } -// if _, err := io.Copy(w, r); err != nil { -// return err -// } -// if err := w.Close(); err != nil { -// return err -// } -// } -// -// Data Messages -// -// The WebSocket protocol distinguishes between text and binary data messages. -// Text messages are interpreted as UTF-8 encoded text. The interpretation of -// binary messages is left to the application. -// -// This package uses the TextMessage and BinaryMessage integer constants to -// identify the two data message types. The ReadMessage and NextReader methods -// return the type of the received message. The messageType argument to the -// WriteMessage and NextWriter methods specifies the type of a sent message. -// -// It is the application's responsibility to ensure that text messages are -// valid UTF-8 encoded text. -// -// Control Messages -// -// The WebSocket protocol defines three types of control messages: close, ping -// and pong. Call the connection WriteControl, WriteMessage or NextWriter -// methods to send a control message to the peer. -// -// Connections handle received close messages by calling the handler function -// set with the SetCloseHandler method and by returning a *CloseError from the -// NextReader, ReadMessage or the message Read method. The default close -// handler sends a close message to the peer. -// -// Connections handle received ping messages by calling the handler function -// set with the SetPingHandler method. The default ping handler sends a pong -// message to the peer. -// -// Connections handle received pong messages by calling the handler function -// set with the SetPongHandler method. The default pong handler does nothing. -// If an application sends ping messages, then the application should set a -// pong handler to receive the corresponding pong. -// -// The control message handler functions are called from the NextReader, -// ReadMessage and message reader Read methods. The default close and ping -// handlers can block these methods for a short time when the handler writes to -// the connection. -// -// The application must read the connection to process close, ping and pong -// messages sent from the peer. If the application is not otherwise interested -// in messages from the peer, then the application should start a goroutine to -// read and discard messages from the peer. A simple example is: -// -// func readLoop(c *websocket.Conn) { -// for { -// if _, _, err := c.NextReader(); err != nil { -// c.Close() -// break -// } -// } -// } -// -// Concurrency -// -// Connections support one concurrent reader and one concurrent writer. -// -// Applications are responsible for ensuring that no more than one goroutine -// calls the write methods (NextWriter, SetWriteDeadline, WriteMessage, -// WriteJSON, EnableWriteCompression, SetCompressionLevel) concurrently and -// that no more than one goroutine calls the read methods (NextReader, -// SetReadDeadline, ReadMessage, ReadJSON, SetPongHandler, SetPingHandler) -// concurrently. -// -// The Close and WriteControl methods can be called concurrently with all other -// methods. -// -// Origin Considerations -// -// Web browsers allow Javascript applications to open a WebSocket connection to -// any host. It's up to the server to enforce an origin policy using the Origin -// request header sent by the browser. -// -// The Upgrader calls the function specified in the CheckOrigin field to check -// the origin. If the CheckOrigin function returns false, then the Upgrade -// method fails the WebSocket handshake with HTTP status 403. -// -// If the CheckOrigin field is nil, then the Upgrader uses a safe default: fail -// the handshake if the Origin request header is present and the Origin host is -// not equal to the Host request header. -// -// The deprecated package-level Upgrade function does not perform origin -// checking. The application is responsible for checking the Origin header -// before calling the Upgrade function. -// -// Buffers -// -// Connections buffer network input and output to reduce the number -// of system calls when reading or writing messages. -// -// Write buffers are also used for constructing WebSocket frames. See RFC 6455, -// Section 5 for a discussion of message framing. A WebSocket frame header is -// written to the network each time a write buffer is flushed to the network. -// Decreasing the size of the write buffer can increase the amount of framing -// overhead on the connection. -// -// The buffer sizes in bytes are specified by the ReadBufferSize and -// WriteBufferSize fields in the Dialer and Upgrader. The Dialer uses a default -// size of 4096 when a buffer size field is set to zero. The Upgrader reuses -// buffers created by the HTTP server when a buffer size field is set to zero. -// The HTTP server buffers have a size of 4096 at the time of this writing. -// -// The buffer sizes do not limit the size of a message that can be read or -// written by a connection. -// -// Buffers are held for the lifetime of the connection by default. If the -// Dialer or Upgrader WriteBufferPool field is set, then a connection holds the -// write buffer only when writing a message. -// -// Applications should tune the buffer sizes to balance memory use and -// performance. Increasing the buffer size uses more memory, but can reduce the -// number of system calls to read or write the network. In the case of writing, -// increasing the buffer size can reduce the number of frame headers written to -// the network. -// -// Some guidelines for setting buffer parameters are: -// -// Limit the buffer sizes to the maximum expected message size. Buffers larger -// than the largest message do not provide any benefit. -// -// Depending on the distribution of message sizes, setting the buffer size to -// to a value less than the maximum expected message size can greatly reduce -// memory use with a small impact on performance. Here's an example: If 99% of -// the messages are smaller than 256 bytes and the maximum message size is 512 -// bytes, then a buffer size of 256 bytes will result in 1.01 more system calls -// than a buffer size of 512 bytes. The memory savings is 50%. -// -// A write buffer pool is useful when the application has a modest number -// writes over a large number of connections. when buffers are pooled, a larger -// buffer size has a reduced impact on total memory use and has the benefit of -// reducing system calls and frame overhead. -// -// Compression EXPERIMENTAL -// -// Per message compression extensions (RFC 7692) are experimentally supported -// by this package in a limited capacity. Setting the EnableCompression option -// to true in Dialer or Upgrader will attempt to negotiate per message deflate -// support. -// -// var upgrader = websocket.Upgrader{ -// EnableCompression: true, -// } -// -// If compression was successfully negotiated with the connection's peer, any -// message received in compressed form will be automatically decompressed. -// All Read methods will return uncompressed bytes. -// -// Per message compression of messages written to a connection can be enabled -// or disabled by calling the corresponding Conn method: -// -// conn.EnableWriteCompression(false) -// -// Currently this package does not support compression with "context takeover". -// This means that messages must be compressed and decompressed in isolation, -// without retaining sliding window or dictionary state across messages. For -// more details refer to RFC 7692. -// -// Use of compression is experimental and may result in decreased performance. -package websocket diff --git a/vendor/github.com/gorilla/websocket/go.mod b/vendor/github.com/gorilla/websocket/go.mod deleted file mode 100644 index 93a9e924..00000000 --- a/vendor/github.com/gorilla/websocket/go.mod +++ /dev/null @@ -1 +0,0 @@ -module github.com/gorilla/websocket diff --git a/vendor/github.com/gorilla/websocket/go.sum b/vendor/github.com/gorilla/websocket/go.sum deleted file mode 100644 index cf4fbbaa..00000000 --- a/vendor/github.com/gorilla/websocket/go.sum +++ /dev/null @@ -1,2 +0,0 @@ -github.com/gorilla/websocket v1.4.0 h1:WDFjx/TMzVgy9VdMMQi2K2Emtwi2QcUQsztZ/zLaH/Q= -github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= diff --git a/vendor/github.com/gorilla/websocket/join.go b/vendor/github.com/gorilla/websocket/join.go deleted file mode 100644 index c64f8c82..00000000 --- a/vendor/github.com/gorilla/websocket/join.go +++ /dev/null @@ -1,42 +0,0 @@ -// Copyright 2019 The Gorilla WebSocket Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package websocket - -import ( - "io" - "strings" -) - -// JoinMessages concatenates received messages to create a single io.Reader. -// The string term is appended to each message. The returned reader does not -// support concurrent calls to the Read method. -func JoinMessages(c *Conn, term string) io.Reader { - return &joinReader{c: c, term: term} -} - -type joinReader struct { - c *Conn - term string - r io.Reader -} - -func (r *joinReader) Read(p []byte) (int, error) { - if r.r == nil { - var err error - _, r.r, err = r.c.NextReader() - if err != nil { - return 0, err - } - if r.term != "" { - r.r = io.MultiReader(r.r, strings.NewReader(r.term)) - } - } - n, err := r.r.Read(p) - if err == io.EOF { - err = nil - r.r = nil - } - return n, err -} diff --git a/vendor/github.com/gorilla/websocket/json.go b/vendor/github.com/gorilla/websocket/json.go deleted file mode 100644 index dc2c1f64..00000000 --- a/vendor/github.com/gorilla/websocket/json.go +++ /dev/null @@ -1,60 +0,0 @@ -// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package websocket - -import ( - "encoding/json" - "io" -) - -// WriteJSON writes the JSON encoding of v as a message. -// -// Deprecated: Use c.WriteJSON instead. -func WriteJSON(c *Conn, v interface{}) error { - return c.WriteJSON(v) -} - -// WriteJSON writes the JSON encoding of v as a message. -// -// See the documentation for encoding/json Marshal for details about the -// conversion of Go values to JSON. -func (c *Conn) WriteJSON(v interface{}) error { - w, err := c.NextWriter(TextMessage) - if err != nil { - return err - } - err1 := json.NewEncoder(w).Encode(v) - err2 := w.Close() - if err1 != nil { - return err1 - } - return err2 -} - -// ReadJSON reads the next JSON-encoded message from the connection and stores -// it in the value pointed to by v. -// -// Deprecated: Use c.ReadJSON instead. -func ReadJSON(c *Conn, v interface{}) error { - return c.ReadJSON(v) -} - -// ReadJSON reads the next JSON-encoded message from the connection and stores -// it in the value pointed to by v. -// -// See the documentation for the encoding/json Unmarshal function for details -// about the conversion of JSON to a Go value. -func (c *Conn) ReadJSON(v interface{}) error { - _, r, err := c.NextReader() - if err != nil { - return err - } - err = json.NewDecoder(r).Decode(v) - if err == io.EOF { - // One value is expected in the message. - err = io.ErrUnexpectedEOF - } - return err -} diff --git a/vendor/github.com/gorilla/websocket/mask.go b/vendor/github.com/gorilla/websocket/mask.go deleted file mode 100644 index 577fce9e..00000000 --- a/vendor/github.com/gorilla/websocket/mask.go +++ /dev/null @@ -1,54 +0,0 @@ -// Copyright 2016 The Gorilla WebSocket Authors. All rights reserved. Use of -// this source code is governed by a BSD-style license that can be found in the -// LICENSE file. - -// +build !appengine - -package websocket - -import "unsafe" - -const wordSize = int(unsafe.Sizeof(uintptr(0))) - -func maskBytes(key [4]byte, pos int, b []byte) int { - // Mask one byte at a time for small buffers. - if len(b) < 2*wordSize { - for i := range b { - b[i] ^= key[pos&3] - pos++ - } - return pos & 3 - } - - // Mask one byte at a time to word boundary. - if n := int(uintptr(unsafe.Pointer(&b[0]))) % wordSize; n != 0 { - n = wordSize - n - for i := range b[:n] { - b[i] ^= key[pos&3] - pos++ - } - b = b[n:] - } - - // Create aligned word size key. - var k [wordSize]byte - for i := range k { - k[i] = key[(pos+i)&3] - } - kw := *(*uintptr)(unsafe.Pointer(&k)) - - // Mask one word at a time. - n := (len(b) / wordSize) * wordSize - for i := 0; i < n; i += wordSize { - *(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&b[0])) + uintptr(i))) ^= kw - } - - // Mask one byte at a time for remaining bytes. - b = b[n:] - for i := range b { - b[i] ^= key[pos&3] - pos++ - } - - return pos & 3 -} diff --git a/vendor/github.com/gorilla/websocket/mask_safe.go b/vendor/github.com/gorilla/websocket/mask_safe.go deleted file mode 100644 index 2aac060e..00000000 --- a/vendor/github.com/gorilla/websocket/mask_safe.go +++ /dev/null @@ -1,15 +0,0 @@ -// Copyright 2016 The Gorilla WebSocket Authors. All rights reserved. Use of -// this source code is governed by a BSD-style license that can be found in the -// LICENSE file. - -// +build appengine - -package websocket - -func maskBytes(key [4]byte, pos int, b []byte) int { - for i := range b { - b[i] ^= key[pos&3] - pos++ - } - return pos & 3 -} diff --git a/vendor/github.com/gorilla/websocket/prepared.go b/vendor/github.com/gorilla/websocket/prepared.go deleted file mode 100644 index 74ec565d..00000000 --- a/vendor/github.com/gorilla/websocket/prepared.go +++ /dev/null @@ -1,102 +0,0 @@ -// Copyright 2017 The Gorilla WebSocket Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package websocket - -import ( - "bytes" - "net" - "sync" - "time" -) - -// PreparedMessage caches on the wire representations of a message payload. -// Use PreparedMessage to efficiently send a message payload to multiple -// connections. PreparedMessage is especially useful when compression is used -// because the CPU and memory expensive compression operation can be executed -// once for a given set of compression options. -type PreparedMessage struct { - messageType int - data []byte - mu sync.Mutex - frames map[prepareKey]*preparedFrame -} - -// prepareKey defines a unique set of options to cache prepared frames in PreparedMessage. -type prepareKey struct { - isServer bool - compress bool - compressionLevel int -} - -// preparedFrame contains data in wire representation. -type preparedFrame struct { - once sync.Once - data []byte -} - -// NewPreparedMessage returns an initialized PreparedMessage. You can then send -// it to connection using WritePreparedMessage method. Valid wire -// representation will be calculated lazily only once for a set of current -// connection options. -func NewPreparedMessage(messageType int, data []byte) (*PreparedMessage, error) { - pm := &PreparedMessage{ - messageType: messageType, - frames: make(map[prepareKey]*preparedFrame), - data: data, - } - - // Prepare a plain server frame. - _, frameData, err := pm.frame(prepareKey{isServer: true, compress: false}) - if err != nil { - return nil, err - } - - // To protect against caller modifying the data argument, remember the data - // copied to the plain server frame. - pm.data = frameData[len(frameData)-len(data):] - return pm, nil -} - -func (pm *PreparedMessage) frame(key prepareKey) (int, []byte, error) { - pm.mu.Lock() - frame, ok := pm.frames[key] - if !ok { - frame = &preparedFrame{} - pm.frames[key] = frame - } - pm.mu.Unlock() - - var err error - frame.once.Do(func() { - // Prepare a frame using a 'fake' connection. - // TODO: Refactor code in conn.go to allow more direct construction of - // the frame. - mu := make(chan bool, 1) - mu <- true - var nc prepareConn - c := &Conn{ - conn: &nc, - mu: mu, - isServer: key.isServer, - compressionLevel: key.compressionLevel, - enableWriteCompression: true, - writeBuf: make([]byte, defaultWriteBufferSize+maxFrameHeaderSize), - } - if key.compress { - c.newCompressionWriter = compressNoContextTakeover - } - err = c.WriteMessage(pm.messageType, pm.data) - frame.data = nc.buf.Bytes() - }) - return pm.messageType, frame.data, err -} - -type prepareConn struct { - buf bytes.Buffer - net.Conn -} - -func (pc *prepareConn) Write(p []byte) (int, error) { return pc.buf.Write(p) } -func (pc *prepareConn) SetWriteDeadline(t time.Time) error { return nil } diff --git a/vendor/github.com/gorilla/websocket/proxy.go b/vendor/github.com/gorilla/websocket/proxy.go deleted file mode 100644 index e87a8c9f..00000000 --- a/vendor/github.com/gorilla/websocket/proxy.go +++ /dev/null @@ -1,77 +0,0 @@ -// Copyright 2017 The Gorilla WebSocket Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package websocket - -import ( - "bufio" - "encoding/base64" - "errors" - "net" - "net/http" - "net/url" - "strings" -) - -type netDialerFunc func(network, addr string) (net.Conn, error) - -func (fn netDialerFunc) Dial(network, addr string) (net.Conn, error) { - return fn(network, addr) -} - -func init() { - proxy_RegisterDialerType("http", func(proxyURL *url.URL, forwardDialer proxy_Dialer) (proxy_Dialer, error) { - return &httpProxyDialer{proxyURL: proxyURL, forwardDial: forwardDialer.Dial}, nil - }) -} - -type httpProxyDialer struct { - proxyURL *url.URL - forwardDial func(network, addr string) (net.Conn, error) -} - -func (hpd *httpProxyDialer) Dial(network string, addr string) (net.Conn, error) { - hostPort, _ := hostPortNoPort(hpd.proxyURL) - conn, err := hpd.forwardDial(network, hostPort) - if err != nil { - return nil, err - } - - connectHeader := make(http.Header) - if user := hpd.proxyURL.User; user != nil { - proxyUser := user.Username() - if proxyPassword, passwordSet := user.Password(); passwordSet { - credential := base64.StdEncoding.EncodeToString([]byte(proxyUser + ":" + proxyPassword)) - connectHeader.Set("Proxy-Authorization", "Basic "+credential) - } - } - - connectReq := &http.Request{ - Method: "CONNECT", - URL: &url.URL{Opaque: addr}, - Host: addr, - Header: connectHeader, - } - - if err := connectReq.Write(conn); err != nil { - conn.Close() - return nil, err - } - - // Read response. It's OK to use and discard buffered reader here becaue - // the remote server does not speak until spoken to. - br := bufio.NewReader(conn) - resp, err := http.ReadResponse(br, connectReq) - if err != nil { - conn.Close() - return nil, err - } - - if resp.StatusCode != 200 { - conn.Close() - f := strings.SplitN(resp.Status, " ", 2) - return nil, errors.New(f[1]) - } - return conn, nil -} diff --git a/vendor/github.com/gorilla/websocket/server.go b/vendor/github.com/gorilla/websocket/server.go deleted file mode 100644 index 3d4480a4..00000000 --- a/vendor/github.com/gorilla/websocket/server.go +++ /dev/null @@ -1,363 +0,0 @@ -// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package websocket - -import ( - "bufio" - "errors" - "io" - "net/http" - "net/url" - "strings" - "time" -) - -// HandshakeError describes an error with the handshake from the peer. -type HandshakeError struct { - message string -} - -func (e HandshakeError) Error() string { return e.message } - -// Upgrader specifies parameters for upgrading an HTTP connection to a -// WebSocket connection. -type Upgrader struct { - // HandshakeTimeout specifies the duration for the handshake to complete. - HandshakeTimeout time.Duration - - // ReadBufferSize and WriteBufferSize specify I/O buffer sizes in bytes. If a buffer - // size is zero, then buffers allocated by the HTTP server are used. The - // I/O buffer sizes do not limit the size of the messages that can be sent - // or received. - ReadBufferSize, WriteBufferSize int - - // WriteBufferPool is a pool of buffers for write operations. If the value - // is not set, then write buffers are allocated to the connection for the - // lifetime of the connection. - // - // A pool is most useful when the application has a modest volume of writes - // across a large number of connections. - // - // Applications should use a single pool for each unique value of - // WriteBufferSize. - WriteBufferPool BufferPool - - // Subprotocols specifies the server's supported protocols in order of - // preference. If this field is not nil, then the Upgrade method negotiates a - // subprotocol by selecting the first match in this list with a protocol - // requested by the client. If there's no match, then no protocol is - // negotiated (the Sec-Websocket-Protocol header is not included in the - // handshake response). - Subprotocols []string - - // Error specifies the function for generating HTTP error responses. If Error - // is nil, then http.Error is used to generate the HTTP response. - Error func(w http.ResponseWriter, r *http.Request, status int, reason error) - - // CheckOrigin returns true if the request Origin header is acceptable. If - // CheckOrigin is nil, then a safe default is used: return false if the - // Origin request header is present and the origin host is not equal to - // request Host header. - // - // A CheckOrigin function should carefully validate the request origin to - // prevent cross-site request forgery. - CheckOrigin func(r *http.Request) bool - - // EnableCompression specify if the server should attempt to negotiate per - // message compression (RFC 7692). Setting this value to true does not - // guarantee that compression will be supported. Currently only "no context - // takeover" modes are supported. - EnableCompression bool -} - -func (u *Upgrader) returnError(w http.ResponseWriter, r *http.Request, status int, reason string) (*Conn, error) { - err := HandshakeError{reason} - if u.Error != nil { - u.Error(w, r, status, err) - } else { - w.Header().Set("Sec-Websocket-Version", "13") - http.Error(w, http.StatusText(status), status) - } - return nil, err -} - -// checkSameOrigin returns true if the origin is not set or is equal to the request host. -func checkSameOrigin(r *http.Request) bool { - origin := r.Header["Origin"] - if len(origin) == 0 { - return true - } - u, err := url.Parse(origin[0]) - if err != nil { - return false - } - return equalASCIIFold(u.Host, r.Host) -} - -func (u *Upgrader) selectSubprotocol(r *http.Request, responseHeader http.Header) string { - if u.Subprotocols != nil { - clientProtocols := Subprotocols(r) - for _, serverProtocol := range u.Subprotocols { - for _, clientProtocol := range clientProtocols { - if clientProtocol == serverProtocol { - return clientProtocol - } - } - } - } else if responseHeader != nil { - return responseHeader.Get("Sec-Websocket-Protocol") - } - return "" -} - -// Upgrade upgrades the HTTP server connection to the WebSocket protocol. -// -// The responseHeader is included in the response to the client's upgrade -// request. Use the responseHeader to specify cookies (Set-Cookie) and the -// application negotiated subprotocol (Sec-WebSocket-Protocol). -// -// If the upgrade fails, then Upgrade replies to the client with an HTTP error -// response. -func (u *Upgrader) Upgrade(w http.ResponseWriter, r *http.Request, responseHeader http.Header) (*Conn, error) { - const badHandshake = "websocket: the client is not using the websocket protocol: " - - if !tokenListContainsValue(r.Header, "Connection", "upgrade") { - return u.returnError(w, r, http.StatusBadRequest, badHandshake+"'upgrade' token not found in 'Connection' header") - } - - if !tokenListContainsValue(r.Header, "Upgrade", "websocket") { - return u.returnError(w, r, http.StatusBadRequest, badHandshake+"'websocket' token not found in 'Upgrade' header") - } - - if r.Method != "GET" { - return u.returnError(w, r, http.StatusMethodNotAllowed, badHandshake+"request method is not GET") - } - - if !tokenListContainsValue(r.Header, "Sec-Websocket-Version", "13") { - return u.returnError(w, r, http.StatusBadRequest, "websocket: unsupported version: 13 not found in 'Sec-Websocket-Version' header") - } - - if _, ok := responseHeader["Sec-Websocket-Extensions"]; ok { - return u.returnError(w, r, http.StatusInternalServerError, "websocket: application specific 'Sec-WebSocket-Extensions' headers are unsupported") - } - - checkOrigin := u.CheckOrigin - if checkOrigin == nil { - checkOrigin = checkSameOrigin - } - if !checkOrigin(r) { - return u.returnError(w, r, http.StatusForbidden, "websocket: request origin not allowed by Upgrader.CheckOrigin") - } - - challengeKey := r.Header.Get("Sec-Websocket-Key") - if challengeKey == "" { - return u.returnError(w, r, http.StatusBadRequest, "websocket: not a websocket handshake: `Sec-WebSocket-Key' header is missing or blank") - } - - subprotocol := u.selectSubprotocol(r, responseHeader) - - // Negotiate PMCE - var compress bool - if u.EnableCompression { - for _, ext := range parseExtensions(r.Header) { - if ext[""] != "permessage-deflate" { - continue - } - compress = true - break - } - } - - h, ok := w.(http.Hijacker) - if !ok { - return u.returnError(w, r, http.StatusInternalServerError, "websocket: response does not implement http.Hijacker") - } - var brw *bufio.ReadWriter - netConn, brw, err := h.Hijack() - if err != nil { - return u.returnError(w, r, http.StatusInternalServerError, err.Error()) - } - - if brw.Reader.Buffered() > 0 { - netConn.Close() - return nil, errors.New("websocket: client sent data before handshake is complete") - } - - var br *bufio.Reader - if u.ReadBufferSize == 0 && bufioReaderSize(netConn, brw.Reader) > 256 { - // Reuse hijacked buffered reader as connection reader. - br = brw.Reader - } - - buf := bufioWriterBuffer(netConn, brw.Writer) - - var writeBuf []byte - if u.WriteBufferPool == nil && u.WriteBufferSize == 0 && len(buf) >= maxFrameHeaderSize+256 { - // Reuse hijacked write buffer as connection buffer. - writeBuf = buf - } - - c := newConn(netConn, true, u.ReadBufferSize, u.WriteBufferSize, u.WriteBufferPool, br, writeBuf) - c.subprotocol = subprotocol - - if compress { - c.newCompressionWriter = compressNoContextTakeover - c.newDecompressionReader = decompressNoContextTakeover - } - - // Use larger of hijacked buffer and connection write buffer for header. - p := buf - if len(c.writeBuf) > len(p) { - p = c.writeBuf - } - p = p[:0] - - p = append(p, "HTTP/1.1 101 Switching Protocols\r\nUpgrade: websocket\r\nConnection: Upgrade\r\nSec-WebSocket-Accept: "...) - p = append(p, computeAcceptKey(challengeKey)...) - p = append(p, "\r\n"...) - if c.subprotocol != "" { - p = append(p, "Sec-WebSocket-Protocol: "...) - p = append(p, c.subprotocol...) - p = append(p, "\r\n"...) - } - if compress { - p = append(p, "Sec-WebSocket-Extensions: permessage-deflate; server_no_context_takeover; client_no_context_takeover\r\n"...) - } - for k, vs := range responseHeader { - if k == "Sec-Websocket-Protocol" { - continue - } - for _, v := range vs { - p = append(p, k...) - p = append(p, ": "...) - for i := 0; i < len(v); i++ { - b := v[i] - if b <= 31 { - // prevent response splitting. - b = ' ' - } - p = append(p, b) - } - p = append(p, "\r\n"...) - } - } - p = append(p, "\r\n"...) - - // Clear deadlines set by HTTP server. - netConn.SetDeadline(time.Time{}) - - if u.HandshakeTimeout > 0 { - netConn.SetWriteDeadline(time.Now().Add(u.HandshakeTimeout)) - } - if _, err = netConn.Write(p); err != nil { - netConn.Close() - return nil, err - } - if u.HandshakeTimeout > 0 { - netConn.SetWriteDeadline(time.Time{}) - } - - return c, nil -} - -// Upgrade upgrades the HTTP server connection to the WebSocket protocol. -// -// Deprecated: Use websocket.Upgrader instead. -// -// Upgrade does not perform origin checking. The application is responsible for -// checking the Origin header before calling Upgrade. An example implementation -// of the same origin policy check is: -// -// if req.Header.Get("Origin") != "http://"+req.Host { -// http.Error(w, "Origin not allowed", http.StatusForbidden) -// return -// } -// -// If the endpoint supports subprotocols, then the application is responsible -// for negotiating the protocol used on the connection. Use the Subprotocols() -// function to get the subprotocols requested by the client. Use the -// Sec-Websocket-Protocol response header to specify the subprotocol selected -// by the application. -// -// The responseHeader is included in the response to the client's upgrade -// request. Use the responseHeader to specify cookies (Set-Cookie) and the -// negotiated subprotocol (Sec-Websocket-Protocol). -// -// The connection buffers IO to the underlying network connection. The -// readBufSize and writeBufSize parameters specify the size of the buffers to -// use. Messages can be larger than the buffers. -// -// If the request is not a valid WebSocket handshake, then Upgrade returns an -// error of type HandshakeError. Applications should handle this error by -// replying to the client with an HTTP error response. -func Upgrade(w http.ResponseWriter, r *http.Request, responseHeader http.Header, readBufSize, writeBufSize int) (*Conn, error) { - u := Upgrader{ReadBufferSize: readBufSize, WriteBufferSize: writeBufSize} - u.Error = func(w http.ResponseWriter, r *http.Request, status int, reason error) { - // don't return errors to maintain backwards compatibility - } - u.CheckOrigin = func(r *http.Request) bool { - // allow all connections by default - return true - } - return u.Upgrade(w, r, responseHeader) -} - -// Subprotocols returns the subprotocols requested by the client in the -// Sec-Websocket-Protocol header. -func Subprotocols(r *http.Request) []string { - h := strings.TrimSpace(r.Header.Get("Sec-Websocket-Protocol")) - if h == "" { - return nil - } - protocols := strings.Split(h, ",") - for i := range protocols { - protocols[i] = strings.TrimSpace(protocols[i]) - } - return protocols -} - -// IsWebSocketUpgrade returns true if the client requested upgrade to the -// WebSocket protocol. -func IsWebSocketUpgrade(r *http.Request) bool { - return tokenListContainsValue(r.Header, "Connection", "upgrade") && - tokenListContainsValue(r.Header, "Upgrade", "websocket") -} - -// bufioReaderSize size returns the size of a bufio.Reader. -func bufioReaderSize(originalReader io.Reader, br *bufio.Reader) int { - // This code assumes that peek on a reset reader returns - // bufio.Reader.buf[:0]. - // TODO: Use bufio.Reader.Size() after Go 1.10 - br.Reset(originalReader) - if p, err := br.Peek(0); err == nil { - return cap(p) - } - return 0 -} - -// writeHook is an io.Writer that records the last slice passed to it vio -// io.Writer.Write. -type writeHook struct { - p []byte -} - -func (wh *writeHook) Write(p []byte) (int, error) { - wh.p = p - return len(p), nil -} - -// bufioWriterBuffer grabs the buffer from a bufio.Writer. -func bufioWriterBuffer(originalWriter io.Writer, bw *bufio.Writer) []byte { - // This code assumes that bufio.Writer.buf[:1] is passed to the - // bufio.Writer's underlying writer. - var wh writeHook - bw.Reset(&wh) - bw.WriteByte(0) - bw.Flush() - - bw.Reset(originalWriter) - - return wh.p[:cap(wh.p)] -} diff --git a/vendor/github.com/gorilla/websocket/trace.go b/vendor/github.com/gorilla/websocket/trace.go deleted file mode 100644 index 834f122a..00000000 --- a/vendor/github.com/gorilla/websocket/trace.go +++ /dev/null @@ -1,19 +0,0 @@ -// +build go1.8 - -package websocket - -import ( - "crypto/tls" - "net/http/httptrace" -) - -func doHandshakeWithTrace(trace *httptrace.ClientTrace, tlsConn *tls.Conn, cfg *tls.Config) error { - if trace.TLSHandshakeStart != nil { - trace.TLSHandshakeStart() - } - err := doHandshake(tlsConn, cfg) - if trace.TLSHandshakeDone != nil { - trace.TLSHandshakeDone(tlsConn.ConnectionState(), err) - } - return err -} diff --git a/vendor/github.com/gorilla/websocket/trace_17.go b/vendor/github.com/gorilla/websocket/trace_17.go deleted file mode 100644 index 77d05a0b..00000000 --- a/vendor/github.com/gorilla/websocket/trace_17.go +++ /dev/null @@ -1,12 +0,0 @@ -// +build !go1.8 - -package websocket - -import ( - "crypto/tls" - "net/http/httptrace" -) - -func doHandshakeWithTrace(trace *httptrace.ClientTrace, tlsConn *tls.Conn, cfg *tls.Config) error { - return doHandshake(tlsConn, cfg) -} diff --git a/vendor/github.com/gorilla/websocket/util.go b/vendor/github.com/gorilla/websocket/util.go deleted file mode 100644 index 7bf2f66c..00000000 --- a/vendor/github.com/gorilla/websocket/util.go +++ /dev/null @@ -1,283 +0,0 @@ -// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package websocket - -import ( - "crypto/rand" - "crypto/sha1" - "encoding/base64" - "io" - "net/http" - "strings" - "unicode/utf8" -) - -var keyGUID = []byte("258EAFA5-E914-47DA-95CA-C5AB0DC85B11") - -func computeAcceptKey(challengeKey string) string { - h := sha1.New() - h.Write([]byte(challengeKey)) - h.Write(keyGUID) - return base64.StdEncoding.EncodeToString(h.Sum(nil)) -} - -func generateChallengeKey() (string, error) { - p := make([]byte, 16) - if _, err := io.ReadFull(rand.Reader, p); err != nil { - return "", err - } - return base64.StdEncoding.EncodeToString(p), nil -} - -// Token octets per RFC 2616. -var isTokenOctet = [256]bool{ - '!': true, - '#': true, - '$': true, - '%': true, - '&': true, - '\'': true, - '*': true, - '+': true, - '-': true, - '.': true, - '0': true, - '1': true, - '2': true, - '3': true, - '4': true, - '5': true, - '6': true, - '7': true, - '8': true, - '9': true, - 'A': true, - 'B': true, - 'C': true, - 'D': true, - 'E': true, - 'F': true, - 'G': true, - 'H': true, - 'I': true, - 'J': true, - 'K': true, - 'L': true, - 'M': true, - 'N': true, - 'O': true, - 'P': true, - 'Q': true, - 'R': true, - 'S': true, - 'T': true, - 'U': true, - 'W': true, - 'V': true, - 'X': true, - 'Y': true, - 'Z': true, - '^': true, - '_': true, - '`': true, - 'a': true, - 'b': true, - 'c': true, - 'd': true, - 'e': true, - 'f': true, - 'g': true, - 'h': true, - 'i': true, - 'j': true, - 'k': true, - 'l': true, - 'm': true, - 'n': true, - 'o': true, - 'p': true, - 'q': true, - 'r': true, - 's': true, - 't': true, - 'u': true, - 'v': true, - 'w': true, - 'x': true, - 'y': true, - 'z': true, - '|': true, - '~': true, -} - -// skipSpace returns a slice of the string s with all leading RFC 2616 linear -// whitespace removed. -func skipSpace(s string) (rest string) { - i := 0 - for ; i < len(s); i++ { - if b := s[i]; b != ' ' && b != '\t' { - break - } - } - return s[i:] -} - -// nextToken returns the leading RFC 2616 token of s and the string following -// the token. -func nextToken(s string) (token, rest string) { - i := 0 - for ; i < len(s); i++ { - if !isTokenOctet[s[i]] { - break - } - } - return s[:i], s[i:] -} - -// nextTokenOrQuoted returns the leading token or quoted string per RFC 2616 -// and the string following the token or quoted string. -func nextTokenOrQuoted(s string) (value string, rest string) { - if !strings.HasPrefix(s, "\"") { - return nextToken(s) - } - s = s[1:] - for i := 0; i < len(s); i++ { - switch s[i] { - case '"': - return s[:i], s[i+1:] - case '\\': - p := make([]byte, len(s)-1) - j := copy(p, s[:i]) - escape := true - for i = i + 1; i < len(s); i++ { - b := s[i] - switch { - case escape: - escape = false - p[j] = b - j++ - case b == '\\': - escape = true - case b == '"': - return string(p[:j]), s[i+1:] - default: - p[j] = b - j++ - } - } - return "", "" - } - } - return "", "" -} - -// equalASCIIFold returns true if s is equal to t with ASCII case folding as -// defined in RFC 4790. -func equalASCIIFold(s, t string) bool { - for s != "" && t != "" { - sr, size := utf8.DecodeRuneInString(s) - s = s[size:] - tr, size := utf8.DecodeRuneInString(t) - t = t[size:] - if sr == tr { - continue - } - if 'A' <= sr && sr <= 'Z' { - sr = sr + 'a' - 'A' - } - if 'A' <= tr && tr <= 'Z' { - tr = tr + 'a' - 'A' - } - if sr != tr { - return false - } - } - return s == t -} - -// tokenListContainsValue returns true if the 1#token header with the given -// name contains a token equal to value with ASCII case folding. -func tokenListContainsValue(header http.Header, name string, value string) bool { -headers: - for _, s := range header[name] { - for { - var t string - t, s = nextToken(skipSpace(s)) - if t == "" { - continue headers - } - s = skipSpace(s) - if s != "" && s[0] != ',' { - continue headers - } - if equalASCIIFold(t, value) { - return true - } - if s == "" { - continue headers - } - s = s[1:] - } - } - return false -} - -// parseExtensions parses WebSocket extensions from a header. -func parseExtensions(header http.Header) []map[string]string { - // From RFC 6455: - // - // Sec-WebSocket-Extensions = extension-list - // extension-list = 1#extension - // extension = extension-token *( ";" extension-param ) - // extension-token = registered-token - // registered-token = token - // extension-param = token [ "=" (token | quoted-string) ] - // ;When using the quoted-string syntax variant, the value - // ;after quoted-string unescaping MUST conform to the - // ;'token' ABNF. - - var result []map[string]string -headers: - for _, s := range header["Sec-Websocket-Extensions"] { - for { - var t string - t, s = nextToken(skipSpace(s)) - if t == "" { - continue headers - } - ext := map[string]string{"": t} - for { - s = skipSpace(s) - if !strings.HasPrefix(s, ";") { - break - } - var k string - k, s = nextToken(skipSpace(s[1:])) - if k == "" { - continue headers - } - s = skipSpace(s) - var v string - if strings.HasPrefix(s, "=") { - v, s = nextTokenOrQuoted(skipSpace(s[1:])) - s = skipSpace(s) - } - if s != "" && s[0] != ',' && s[0] != ';' { - continue headers - } - ext[k] = v - } - if s != "" && s[0] != ',' { - continue headers - } - result = append(result, ext) - if s == "" { - continue headers - } - s = s[1:] - } - } - return result -} diff --git a/vendor/github.com/gorilla/websocket/x_net_proxy.go b/vendor/github.com/gorilla/websocket/x_net_proxy.go deleted file mode 100644 index 2e668f6b..00000000 --- a/vendor/github.com/gorilla/websocket/x_net_proxy.go +++ /dev/null @@ -1,473 +0,0 @@ -// Code generated by golang.org/x/tools/cmd/bundle. DO NOT EDIT. -//go:generate bundle -o x_net_proxy.go golang.org/x/net/proxy - -// Package proxy provides support for a variety of protocols to proxy network -// data. -// - -package websocket - -import ( - "errors" - "io" - "net" - "net/url" - "os" - "strconv" - "strings" - "sync" -) - -type proxy_direct struct{} - -// Direct is a direct proxy: one that makes network connections directly. -var proxy_Direct = proxy_direct{} - -func (proxy_direct) Dial(network, addr string) (net.Conn, error) { - return net.Dial(network, addr) -} - -// A PerHost directs connections to a default Dialer unless the host name -// requested matches one of a number of exceptions. -type proxy_PerHost struct { - def, bypass proxy_Dialer - - bypassNetworks []*net.IPNet - bypassIPs []net.IP - bypassZones []string - bypassHosts []string -} - -// NewPerHost returns a PerHost Dialer that directs connections to either -// defaultDialer or bypass, depending on whether the connection matches one of -// the configured rules. -func proxy_NewPerHost(defaultDialer, bypass proxy_Dialer) *proxy_PerHost { - return &proxy_PerHost{ - def: defaultDialer, - bypass: bypass, - } -} - -// Dial connects to the address addr on the given network through either -// defaultDialer or bypass. -func (p *proxy_PerHost) Dial(network, addr string) (c net.Conn, err error) { - host, _, err := net.SplitHostPort(addr) - if err != nil { - return nil, err - } - - return p.dialerForRequest(host).Dial(network, addr) -} - -func (p *proxy_PerHost) dialerForRequest(host string) proxy_Dialer { - if ip := net.ParseIP(host); ip != nil { - for _, net := range p.bypassNetworks { - if net.Contains(ip) { - return p.bypass - } - } - for _, bypassIP := range p.bypassIPs { - if bypassIP.Equal(ip) { - return p.bypass - } - } - return p.def - } - - for _, zone := range p.bypassZones { - if strings.HasSuffix(host, zone) { - return p.bypass - } - if host == zone[1:] { - // For a zone ".example.com", we match "example.com" - // too. - return p.bypass - } - } - for _, bypassHost := range p.bypassHosts { - if bypassHost == host { - return p.bypass - } - } - return p.def -} - -// AddFromString parses a string that contains comma-separated values -// specifying hosts that should use the bypass proxy. Each value is either an -// IP address, a CIDR range, a zone (*.example.com) or a host name -// (localhost). A best effort is made to parse the string and errors are -// ignored. -func (p *proxy_PerHost) AddFromString(s string) { - hosts := strings.Split(s, ",") - for _, host := range hosts { - host = strings.TrimSpace(host) - if len(host) == 0 { - continue - } - if strings.Contains(host, "/") { - // We assume that it's a CIDR address like 127.0.0.0/8 - if _, net, err := net.ParseCIDR(host); err == nil { - p.AddNetwork(net) - } - continue - } - if ip := net.ParseIP(host); ip != nil { - p.AddIP(ip) - continue - } - if strings.HasPrefix(host, "*.") { - p.AddZone(host[1:]) - continue - } - p.AddHost(host) - } -} - -// AddIP specifies an IP address that will use the bypass proxy. Note that -// this will only take effect if a literal IP address is dialed. A connection -// to a named host will never match an IP. -func (p *proxy_PerHost) AddIP(ip net.IP) { - p.bypassIPs = append(p.bypassIPs, ip) -} - -// AddNetwork specifies an IP range that will use the bypass proxy. Note that -// this will only take effect if a literal IP address is dialed. A connection -// to a named host will never match. -func (p *proxy_PerHost) AddNetwork(net *net.IPNet) { - p.bypassNetworks = append(p.bypassNetworks, net) -} - -// AddZone specifies a DNS suffix that will use the bypass proxy. A zone of -// "example.com" matches "example.com" and all of its subdomains. -func (p *proxy_PerHost) AddZone(zone string) { - if strings.HasSuffix(zone, ".") { - zone = zone[:len(zone)-1] - } - if !strings.HasPrefix(zone, ".") { - zone = "." + zone - } - p.bypassZones = append(p.bypassZones, zone) -} - -// AddHost specifies a host name that will use the bypass proxy. -func (p *proxy_PerHost) AddHost(host string) { - if strings.HasSuffix(host, ".") { - host = host[:len(host)-1] - } - p.bypassHosts = append(p.bypassHosts, host) -} - -// A Dialer is a means to establish a connection. -type proxy_Dialer interface { - // Dial connects to the given address via the proxy. - Dial(network, addr string) (c net.Conn, err error) -} - -// Auth contains authentication parameters that specific Dialers may require. -type proxy_Auth struct { - User, Password string -} - -// FromEnvironment returns the dialer specified by the proxy related variables in -// the environment. -func proxy_FromEnvironment() proxy_Dialer { - allProxy := proxy_allProxyEnv.Get() - if len(allProxy) == 0 { - return proxy_Direct - } - - proxyURL, err := url.Parse(allProxy) - if err != nil { - return proxy_Direct - } - proxy, err := proxy_FromURL(proxyURL, proxy_Direct) - if err != nil { - return proxy_Direct - } - - noProxy := proxy_noProxyEnv.Get() - if len(noProxy) == 0 { - return proxy - } - - perHost := proxy_NewPerHost(proxy, proxy_Direct) - perHost.AddFromString(noProxy) - return perHost -} - -// proxySchemes is a map from URL schemes to a function that creates a Dialer -// from a URL with such a scheme. -var proxy_proxySchemes map[string]func(*url.URL, proxy_Dialer) (proxy_Dialer, error) - -// RegisterDialerType takes a URL scheme and a function to generate Dialers from -// a URL with that scheme and a forwarding Dialer. Registered schemes are used -// by FromURL. -func proxy_RegisterDialerType(scheme string, f func(*url.URL, proxy_Dialer) (proxy_Dialer, error)) { - if proxy_proxySchemes == nil { - proxy_proxySchemes = make(map[string]func(*url.URL, proxy_Dialer) (proxy_Dialer, error)) - } - proxy_proxySchemes[scheme] = f -} - -// FromURL returns a Dialer given a URL specification and an underlying -// Dialer for it to make network requests. -func proxy_FromURL(u *url.URL, forward proxy_Dialer) (proxy_Dialer, error) { - var auth *proxy_Auth - if u.User != nil { - auth = new(proxy_Auth) - auth.User = u.User.Username() - if p, ok := u.User.Password(); ok { - auth.Password = p - } - } - - switch u.Scheme { - case "socks5": - return proxy_SOCKS5("tcp", u.Host, auth, forward) - } - - // If the scheme doesn't match any of the built-in schemes, see if it - // was registered by another package. - if proxy_proxySchemes != nil { - if f, ok := proxy_proxySchemes[u.Scheme]; ok { - return f(u, forward) - } - } - - return nil, errors.New("proxy: unknown scheme: " + u.Scheme) -} - -var ( - proxy_allProxyEnv = &proxy_envOnce{ - names: []string{"ALL_PROXY", "all_proxy"}, - } - proxy_noProxyEnv = &proxy_envOnce{ - names: []string{"NO_PROXY", "no_proxy"}, - } -) - -// envOnce looks up an environment variable (optionally by multiple -// names) once. It mitigates expensive lookups on some platforms -// (e.g. Windows). -// (Borrowed from net/http/transport.go) -type proxy_envOnce struct { - names []string - once sync.Once - val string -} - -func (e *proxy_envOnce) Get() string { - e.once.Do(e.init) - return e.val -} - -func (e *proxy_envOnce) init() { - for _, n := range e.names { - e.val = os.Getenv(n) - if e.val != "" { - return - } - } -} - -// SOCKS5 returns a Dialer that makes SOCKSv5 connections to the given address -// with an optional username and password. See RFC 1928 and RFC 1929. -func proxy_SOCKS5(network, addr string, auth *proxy_Auth, forward proxy_Dialer) (proxy_Dialer, error) { - s := &proxy_socks5{ - network: network, - addr: addr, - forward: forward, - } - if auth != nil { - s.user = auth.User - s.password = auth.Password - } - - return s, nil -} - -type proxy_socks5 struct { - user, password string - network, addr string - forward proxy_Dialer -} - -const proxy_socks5Version = 5 - -const ( - proxy_socks5AuthNone = 0 - proxy_socks5AuthPassword = 2 -) - -const proxy_socks5Connect = 1 - -const ( - proxy_socks5IP4 = 1 - proxy_socks5Domain = 3 - proxy_socks5IP6 = 4 -) - -var proxy_socks5Errors = []string{ - "", - "general failure", - "connection forbidden", - "network unreachable", - "host unreachable", - "connection refused", - "TTL expired", - "command not supported", - "address type not supported", -} - -// Dial connects to the address addr on the given network via the SOCKS5 proxy. -func (s *proxy_socks5) Dial(network, addr string) (net.Conn, error) { - switch network { - case "tcp", "tcp6", "tcp4": - default: - return nil, errors.New("proxy: no support for SOCKS5 proxy connections of type " + network) - } - - conn, err := s.forward.Dial(s.network, s.addr) - if err != nil { - return nil, err - } - if err := s.connect(conn, addr); err != nil { - conn.Close() - return nil, err - } - return conn, nil -} - -// connect takes an existing connection to a socks5 proxy server, -// and commands the server to extend that connection to target, -// which must be a canonical address with a host and port. -func (s *proxy_socks5) connect(conn net.Conn, target string) error { - host, portStr, err := net.SplitHostPort(target) - if err != nil { - return err - } - - port, err := strconv.Atoi(portStr) - if err != nil { - return errors.New("proxy: failed to parse port number: " + portStr) - } - if port < 1 || port > 0xffff { - return errors.New("proxy: port number out of range: " + portStr) - } - - // the size here is just an estimate - buf := make([]byte, 0, 6+len(host)) - - buf = append(buf, proxy_socks5Version) - if len(s.user) > 0 && len(s.user) < 256 && len(s.password) < 256 { - buf = append(buf, 2 /* num auth methods */, proxy_socks5AuthNone, proxy_socks5AuthPassword) - } else { - buf = append(buf, 1 /* num auth methods */, proxy_socks5AuthNone) - } - - if _, err := conn.Write(buf); err != nil { - return errors.New("proxy: failed to write greeting to SOCKS5 proxy at " + s.addr + ": " + err.Error()) - } - - if _, err := io.ReadFull(conn, buf[:2]); err != nil { - return errors.New("proxy: failed to read greeting from SOCKS5 proxy at " + s.addr + ": " + err.Error()) - } - if buf[0] != 5 { - return errors.New("proxy: SOCKS5 proxy at " + s.addr + " has unexpected version " + strconv.Itoa(int(buf[0]))) - } - if buf[1] == 0xff { - return errors.New("proxy: SOCKS5 proxy at " + s.addr + " requires authentication") - } - - // See RFC 1929 - if buf[1] == proxy_socks5AuthPassword { - buf = buf[:0] - buf = append(buf, 1 /* password protocol version */) - buf = append(buf, uint8(len(s.user))) - buf = append(buf, s.user...) - buf = append(buf, uint8(len(s.password))) - buf = append(buf, s.password...) - - if _, err := conn.Write(buf); err != nil { - return errors.New("proxy: failed to write authentication request to SOCKS5 proxy at " + s.addr + ": " + err.Error()) - } - - if _, err := io.ReadFull(conn, buf[:2]); err != nil { - return errors.New("proxy: failed to read authentication reply from SOCKS5 proxy at " + s.addr + ": " + err.Error()) - } - - if buf[1] != 0 { - return errors.New("proxy: SOCKS5 proxy at " + s.addr + " rejected username/password") - } - } - - buf = buf[:0] - buf = append(buf, proxy_socks5Version, proxy_socks5Connect, 0 /* reserved */) - - if ip := net.ParseIP(host); ip != nil { - if ip4 := ip.To4(); ip4 != nil { - buf = append(buf, proxy_socks5IP4) - ip = ip4 - } else { - buf = append(buf, proxy_socks5IP6) - } - buf = append(buf, ip...) - } else { - if len(host) > 255 { - return errors.New("proxy: destination host name too long: " + host) - } - buf = append(buf, proxy_socks5Domain) - buf = append(buf, byte(len(host))) - buf = append(buf, host...) - } - buf = append(buf, byte(port>>8), byte(port)) - - if _, err := conn.Write(buf); err != nil { - return errors.New("proxy: failed to write connect request to SOCKS5 proxy at " + s.addr + ": " + err.Error()) - } - - if _, err := io.ReadFull(conn, buf[:4]); err != nil { - return errors.New("proxy: failed to read connect reply from SOCKS5 proxy at " + s.addr + ": " + err.Error()) - } - - failure := "unknown error" - if int(buf[1]) < len(proxy_socks5Errors) { - failure = proxy_socks5Errors[buf[1]] - } - - if len(failure) > 0 { - return errors.New("proxy: SOCKS5 proxy at " + s.addr + " failed to connect: " + failure) - } - - bytesToDiscard := 0 - switch buf[3] { - case proxy_socks5IP4: - bytesToDiscard = net.IPv4len - case proxy_socks5IP6: - bytesToDiscard = net.IPv6len - case proxy_socks5Domain: - _, err := io.ReadFull(conn, buf[:1]) - if err != nil { - return errors.New("proxy: failed to read domain length from SOCKS5 proxy at " + s.addr + ": " + err.Error()) - } - bytesToDiscard = int(buf[0]) - default: - return errors.New("proxy: got unknown address type " + strconv.Itoa(int(buf[3])) + " from SOCKS5 proxy at " + s.addr) - } - - if cap(buf) < bytesToDiscard { - buf = make([]byte, bytesToDiscard) - } else { - buf = buf[:bytesToDiscard] - } - if _, err := io.ReadFull(conn, buf); err != nil { - return errors.New("proxy: failed to read address from SOCKS5 proxy at " + s.addr + ": " + err.Error()) - } - - // Also need to discard the port number - if _, err := io.ReadFull(conn, buf[:2]); err != nil { - return errors.New("proxy: failed to read port from SOCKS5 proxy at " + s.addr + ": " + err.Error()) - } - - return nil -} From 233a68d0c66fd5d5d875b8d46134c918be403896 Mon Sep 17 00:00:00 2001 From: CarlHembrough Date: Wed, 30 Sep 2020 12:17:46 +0100 Subject: [PATCH 08/15] Upgrade dp-graph to v2.2.0 --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 9cb79ff9..9257f69b 100644 --- a/go.mod +++ b/go.mod @@ -5,7 +5,7 @@ go 1.13 require ( github.com/ONSdigital/dp-api-clients-go v1.28.0 github.com/ONSdigital/dp-authorisation v0.1.0 - github.com/ONSdigital/dp-graph/v2 v2.1.3 + github.com/ONSdigital/dp-graph/v2 v2.2.0 github.com/ONSdigital/dp-healthcheck v1.0.5 github.com/ONSdigital/dp-kafka v1.1.7 github.com/ONSdigital/dp-mongodb v1.4.0 diff --git a/go.sum b/go.sum index aac7808b..2539a5a5 100644 --- a/go.sum +++ b/go.sum @@ -5,8 +5,8 @@ github.com/ONSdigital/dp-api-clients-go v1.28.0/go.mod h1:iyJy6uRL4B6OYOJA0XMr5U github.com/ONSdigital/dp-authorisation v0.1.0 h1:HzYwJdvk7ZAeB56KMAH6MP5+5uZuuJnEyGq6CViDoCg= github.com/ONSdigital/dp-authorisation v0.1.0/go.mod h1:rT81tcvWto5/cUWUFd0Q6gTqBoRfQmD6Qp0sq7FyiMg= github.com/ONSdigital/dp-frontend-models v1.1.0/go.mod h1:TT96P7Mi69N3Tc/jFNdbjiwG4GAaMjP26HLotFQ6BPw= -github.com/ONSdigital/dp-graph/v2 v2.1.3 h1:7BJsRE9FDf7tFtJvI5v7RX94q4BAbiJFDWpxQCNfM18= -github.com/ONSdigital/dp-graph/v2 v2.1.3/go.mod h1:6C59rOY0qBKblczkQrJZZKa8ZLR3yKkyunSCeIUavtU= +github.com/ONSdigital/dp-graph/v2 v2.2.0 h1:duocjKyN3623/A/gvata/6PWjcD4bnMWgBxlwiN46sc= +github.com/ONSdigital/dp-graph/v2 v2.2.0/go.mod h1:6C59rOY0qBKblczkQrJZZKa8ZLR3yKkyunSCeIUavtU= github.com/ONSdigital/dp-healthcheck v0.0.0-20200131122546-9db6d3f0494e/go.mod h1:zighxZ/0m5u7zo0eAr8XFlA+Dz2ic7A1vna6YXvhCjQ= github.com/ONSdigital/dp-healthcheck v1.0.0/go.mod h1:zighxZ/0m5u7zo0eAr8XFlA+Dz2ic7A1vna6YXvhCjQ= github.com/ONSdigital/dp-healthcheck v1.0.2/go.mod h1:zighxZ/0m5u7zo0eAr8XFlA+Dz2ic7A1vna6YXvhCjQ= From 4345d157d7272b418e7ecbe24bbb99968108a8be Mon Sep 17 00:00:00 2001 From: CarlHembrough Date: Thu, 1 Oct 2020 07:05:23 +0100 Subject: [PATCH 09/15] Upgrade dp-graph to v2.2.0 --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 9cb79ff9..9257f69b 100644 --- a/go.mod +++ b/go.mod @@ -5,7 +5,7 @@ go 1.13 require ( github.com/ONSdigital/dp-api-clients-go v1.28.0 github.com/ONSdigital/dp-authorisation v0.1.0 - github.com/ONSdigital/dp-graph/v2 v2.1.3 + github.com/ONSdigital/dp-graph/v2 v2.2.0 github.com/ONSdigital/dp-healthcheck v1.0.5 github.com/ONSdigital/dp-kafka v1.1.7 github.com/ONSdigital/dp-mongodb v1.4.0 diff --git a/go.sum b/go.sum index d56a4063..ed1c4384 100644 --- a/go.sum +++ b/go.sum @@ -5,8 +5,8 @@ github.com/ONSdigital/dp-api-clients-go v1.28.0/go.mod h1:iyJy6uRL4B6OYOJA0XMr5U github.com/ONSdigital/dp-authorisation v0.1.0 h1:HzYwJdvk7ZAeB56KMAH6MP5+5uZuuJnEyGq6CViDoCg= github.com/ONSdigital/dp-authorisation v0.1.0/go.mod h1:rT81tcvWto5/cUWUFd0Q6gTqBoRfQmD6Qp0sq7FyiMg= github.com/ONSdigital/dp-frontend-models v1.1.0/go.mod h1:TT96P7Mi69N3Tc/jFNdbjiwG4GAaMjP26HLotFQ6BPw= -github.com/ONSdigital/dp-graph/v2 v2.1.3 h1:7BJsRE9FDf7tFtJvI5v7RX94q4BAbiJFDWpxQCNfM18= -github.com/ONSdigital/dp-graph/v2 v2.1.3/go.mod h1:6C59rOY0qBKblczkQrJZZKa8ZLR3yKkyunSCeIUavtU= +github.com/ONSdigital/dp-graph/v2 v2.2.0 h1:duocjKyN3623/A/gvata/6PWjcD4bnMWgBxlwiN46sc= +github.com/ONSdigital/dp-graph/v2 v2.2.0/go.mod h1:6C59rOY0qBKblczkQrJZZKa8ZLR3yKkyunSCeIUavtU= github.com/ONSdigital/dp-healthcheck v0.0.0-20200131122546-9db6d3f0494e/go.mod h1:zighxZ/0m5u7zo0eAr8XFlA+Dz2ic7A1vna6YXvhCjQ= github.com/ONSdigital/dp-healthcheck v1.0.0/go.mod h1:zighxZ/0m5u7zo0eAr8XFlA+Dz2ic7A1vna6YXvhCjQ= github.com/ONSdigital/dp-healthcheck v1.0.2 h1:N8SzpYzdixVgJS9NMzTBA2RZ2bi3Am1wE5F8ROEpTYw= From b6aae5e06bbcb44e93e1fce1a600a51cb482be83 Mon Sep 17 00:00:00 2001 From: CarlHembrough Date: Tue, 6 Oct 2020 10:06:49 +0100 Subject: [PATCH 10/15] Default to use Neo4j in Makefile --- Makefile | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/Makefile b/Makefile index e1deec6f..507444dd 100644 --- a/Makefile +++ b/Makefile @@ -8,9 +8,8 @@ GIT_COMMIT=$(shell git rev-parse HEAD) VERSION ?= $(shell git tag --points-at HEAD | grep ^v | head -n 1) LDFLAGS=-ldflags "-X main.BuildTime=$(BUILD_TIME) -X main.GitCommit=$(GIT_COMMIT) -X main.Version=$(VERSION)" -export GRAPH_DRIVER_TYPE?=neptune -export GRAPH_ADDR?=ws://localhost:8182/gremlin - +export GRAPH_DRIVER_TYPE?=neo4j +export GRAPH_ADDR?=bolt://localhost:7687 export ENABLE_PRIVATE_ENDPOINTS?=true .PHONY: all From 8de078ae8a0d530b983a72fe0e99f8f0b72419aa Mon Sep 17 00:00:00 2001 From: CarlHembrough Date: Tue, 13 Oct 2020 13:45:01 +0100 Subject: [PATCH 11/15] Upgrade ONSdigital dependencies --- go.mod | 6 +++--- go.sum | 11 ++++++----- 2 files changed, 9 insertions(+), 8 deletions(-) diff --git a/go.mod b/go.mod index 9257f69b..8c280b1b 100644 --- a/go.mod +++ b/go.mod @@ -3,13 +3,13 @@ module github.com/ONSdigital/dp-dataset-api go 1.13 require ( - github.com/ONSdigital/dp-api-clients-go v1.28.0 + github.com/ONSdigital/dp-api-clients-go v1.30.0 github.com/ONSdigital/dp-authorisation v0.1.0 - github.com/ONSdigital/dp-graph/v2 v2.2.0 + github.com/ONSdigital/dp-graph/v2 v2.2.2 github.com/ONSdigital/dp-healthcheck v1.0.5 github.com/ONSdigital/dp-kafka v1.1.7 github.com/ONSdigital/dp-mongodb v1.4.0 - github.com/ONSdigital/dp-net v1.0.8 + github.com/ONSdigital/dp-net v1.0.9 github.com/ONSdigital/go-ns v0.0.0-20200205115900-a11716f93bad github.com/ONSdigital/log.go v1.0.1 github.com/frankban/quicktest v1.9.0 // indirect diff --git a/go.sum b/go.sum index 2539a5a5..c907f1a0 100644 --- a/go.sum +++ b/go.sum @@ -2,14 +2,15 @@ github.com/ONSdigital/dp-api-clients-go v1.1.0/go.mod h1:9lqor0I7caCnRWr04gU/r7x github.com/ONSdigital/dp-api-clients-go v1.9.0/go.mod h1:SM0b/NXDWndJ9EulmAGdfDY4DxPxK+pNsP8eZlIWiqM= github.com/ONSdigital/dp-api-clients-go v1.28.0 h1:ExIUlHC6uBdBlFwt/gAI0ApSzpyigy0NWJFK3XCwSVc= github.com/ONSdigital/dp-api-clients-go v1.28.0/go.mod h1:iyJy6uRL4B6OYOJA0XMr5UHt6+Q8XmN9uwmURO+9Oj4= +github.com/ONSdigital/dp-api-clients-go v1.30.0 h1:TA3LHTccG4GHlUqDHGSrJRZEq15Wd1q1thE4Yxdv8H8= +github.com/ONSdigital/dp-api-clients-go v1.30.0/go.mod h1:iyJy6uRL4B6OYOJA0XMr5UHt6+Q8XmN9uwmURO+9Oj4= github.com/ONSdigital/dp-authorisation v0.1.0 h1:HzYwJdvk7ZAeB56KMAH6MP5+5uZuuJnEyGq6CViDoCg= github.com/ONSdigital/dp-authorisation v0.1.0/go.mod h1:rT81tcvWto5/cUWUFd0Q6gTqBoRfQmD6Qp0sq7FyiMg= github.com/ONSdigital/dp-frontend-models v1.1.0/go.mod h1:TT96P7Mi69N3Tc/jFNdbjiwG4GAaMjP26HLotFQ6BPw= -github.com/ONSdigital/dp-graph/v2 v2.2.0 h1:duocjKyN3623/A/gvata/6PWjcD4bnMWgBxlwiN46sc= -github.com/ONSdigital/dp-graph/v2 v2.2.0/go.mod h1:6C59rOY0qBKblczkQrJZZKa8ZLR3yKkyunSCeIUavtU= +github.com/ONSdigital/dp-graph/v2 v2.2.2 h1:hqA+BCHdxsDw3KawdiemRmtWBQhAxDHP+DXhhFpk3H8= +github.com/ONSdigital/dp-graph/v2 v2.2.2/go.mod h1:K4LIhFcyxB8g7nUG5I5I8x6QVf89x82dCEFBbE0mmaQ= github.com/ONSdigital/dp-healthcheck v0.0.0-20200131122546-9db6d3f0494e/go.mod h1:zighxZ/0m5u7zo0eAr8XFlA+Dz2ic7A1vna6YXvhCjQ= github.com/ONSdigital/dp-healthcheck v1.0.0/go.mod h1:zighxZ/0m5u7zo0eAr8XFlA+Dz2ic7A1vna6YXvhCjQ= -github.com/ONSdigital/dp-healthcheck v1.0.2/go.mod h1:zighxZ/0m5u7zo0eAr8XFlA+Dz2ic7A1vna6YXvhCjQ= github.com/ONSdigital/dp-healthcheck v1.0.5 h1:DXnohGIqXaLLeYGdaGOhgkZjAbWMNoLAjQ3EgZeMT3M= github.com/ONSdigital/dp-healthcheck v1.0.5/go.mod h1:2wbVAUHMl9+4tWhUlxYUuA1dnf2+NrwzC+So5f5BMLk= github.com/ONSdigital/dp-kafka v1.1.7 h1:/XjDYHZDxA0r4JR5Ua/99z8ocVN6OUU/VyPTeWh95Qc= @@ -22,8 +23,8 @@ github.com/ONSdigital/dp-net v1.0.5-0.20200805082802-e518bc287596/go.mod h1:wDVh github.com/ONSdigital/dp-net v1.0.5-0.20200805145012-9227a11caddb/go.mod h1:MrSZwDUvp8u1VJEqa+36Gwq4E7/DdceW+BDCvGes6Cs= github.com/ONSdigital/dp-net v1.0.5-0.20200805150805-cac050646ab5/go.mod h1:de3LB9tedE0tObBwa12dUOt5rvTW4qQkF5rXtt4b6CE= github.com/ONSdigital/dp-net v1.0.7/go.mod h1:1QFzx32FwPKD2lgZI6MtcsUXritsBdJihlzIWDrQ/gc= -github.com/ONSdigital/dp-net v1.0.8 h1:fcOw+PBZWgp14ryz61daMvkqFKIK7lp7AceRbaBLa24= -github.com/ONSdigital/dp-net v1.0.8/go.mod h1:2lvIKOlD4T3BjWQwjHhBUO2UNWDk82u/+mHRn0R3C9A= +github.com/ONSdigital/dp-net v1.0.9 h1:jjQzUkCPNEvqdOWTuK+F65HE46dH72Owtfcv9yKq8kc= +github.com/ONSdigital/dp-net v1.0.9/go.mod h1:2lvIKOlD4T3BjWQwjHhBUO2UNWDk82u/+mHRn0R3C9A= github.com/ONSdigital/dp-rchttp v0.0.0-20190919143000-bb5699e6fd59/go.mod h1:KkW68U3FPuivW4ogi9L8CPKNj9ZxGko4qcUY7KoAAkQ= github.com/ONSdigital/dp-rchttp v0.0.0-20200114090501-463a529590e8/go.mod h1:821jZtK0oBsV8hjIkNr8vhAWuv0FxJBPJuAHa2B70Gk= github.com/ONSdigital/dp-rchttp v1.0.0 h1:K/1/gDtfMZCX1Mbmq80nZxzDirzneqA1c89ea26FqP4= From 7c6dc6aa26068b6e194c80ff19e5fbfd8c90f605 Mon Sep 17 00:00:00 2001 From: CarlHembrough Date: Tue, 13 Oct 2020 13:48:07 +0100 Subject: [PATCH 12/15] Add missing phony target to Makefile --- Makefile | 2 ++ 1 file changed, 2 insertions(+) diff --git a/Makefile b/Makefile index 507444dd..296297c7 100644 --- a/Makefile +++ b/Makefile @@ -27,6 +27,8 @@ build: .PHONY: debug debug: HUMAN_LOG=1 go run -race $(LDFLAGS) main.go + +.PHONY: acceptance-publishing acceptance-publishing: build ENABLE_PRIVATE_ENDPOINTS=true MONGODB_DATABASE=test HUMAN_LOG=1 go run -race $(LDFLAGS) main.go From 4cc4ffb3b01852ef56ebd72e955609a5baa4ec72 Mon Sep 17 00:00:00 2001 From: Andre Urbani Date: Thu, 15 Oct 2020 09:32:41 +0100 Subject: [PATCH 13/15] added URI whitespace trimming function and related unit tests --- go.sum | 1 + models/dataset.go | 23 +++++++++++++++ models/dataset_test.go | 63 ++++++++++++++++++++++++++++++++++++++++++ mongo/dataset_store.go | 5 ++++ mongo/dataset_test.go | 4 +-- 5 files changed, 94 insertions(+), 2 deletions(-) diff --git a/go.sum b/go.sum index c95b8ecf..42d7c635 100644 --- a/go.sum +++ b/go.sum @@ -160,6 +160,7 @@ golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190425150028-36563e24a262 h1:qsl9y/CJx34tuA7QCPNp86JNJe4spst6Ff8MjvPUdPg= golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7 h1:9zdDQZ7Thm29KFXgAX/+yaf3eVbP7djjWp/dXAppNCc= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= diff --git a/models/dataset.go b/models/dataset.go index f8bd1e5c..76091cd4 100644 --- a/models/dataset.go +++ b/models/dataset.go @@ -6,7 +6,9 @@ import ( "fmt" "io" "io/ioutil" + "net/url" "strconv" + "strings" "time" errs "github.com/ONSdigital/dp-dataset-api/apierrors" @@ -398,6 +400,27 @@ func (ed *EditionUpdate) PublishLinks(ctx context.Context, host string, versionL return nil } +func ValidateDataset(ctx context.Context, dataset *Dataset) error { + var invalidFields []string + if dataset.URI != "" { + dataset.URI = strings.TrimSpace(dataset.URI) + datasetURI, err := url.Parse(dataset.URI) + if err != nil { + invalidFields = append(invalidFields, "URI") + log.Event(ctx, "error parsing URI", log.ERROR, log.Error(err)) + } else { + if !strings.Contains(datasetURI.Path, "/datasets") || !strings.Contains(datasetURI.Path, dataset.ID) { + fmt.Println("hello") + invalidFields = append(invalidFields, "URI") + } + } + } + if invalidFields != nil { + return fmt.Errorf("invalid fields: %v", invalidFields) + } + return nil +} + // ValidateVersion checks the content of the version structure func ValidateVersion(version *Version) error { diff --git a/models/dataset_test.go b/models/dataset_test.go index a9350d19..f5b32d5c 100644 --- a/models/dataset_test.go +++ b/models/dataset_test.go @@ -13,6 +13,13 @@ import ( . "github.com/smartystreets/goconvey/convey" ) +func createDataset() Dataset { + return Dataset{ + ID: "123", + URI: "http://localhost:22000/datasets/123", + } +} + var testContext = context.Background() func TestCreateDataset(t *testing.T) { @@ -135,6 +142,62 @@ func TestCreateVersion(t *testing.T) { }) } +func TestValidateDataset(t *testing.T) { + t.Parallel() + + Convey("Unsuccessful validation (false) returned", t, func() { + + Convey("when dataset.URI is unable to be parsed into url format", func() { + dataset := createDataset() + dataset.URI = ":foo" + fmt.Println(dataset.URI) + validationErr := ValidateDataset(testContext, &dataset) + So(validationErr, ShouldNotBeNil) + So(validationErr.Error(), ShouldResemble, errors.New("invalid fields: [URI]").Error()) + }) + + Convey("when dataset.URI does not contain 'datasets' path", func() { + dataset := createDataset() + dataset.URI = "/123" + valid := ValidateDataset(testContext, &dataset) + So(valid, ShouldNotBeNil) + So(valid.Error(), ShouldResemble, errors.New("invalid fields: [URI]").Error()) + }) + + Convey("when dataset.URI does not contain 'id' path", func() { + dataset := createDataset() + dataset.URI = "http://localhost:22000/datasets" + valid := ValidateDataset(testContext, &dataset) + So(valid, ShouldNotBeNil) + So(valid.Error(), ShouldResemble, errors.New("invalid fields: [URI]").Error()) + }) + + }) + + Convey("Successful validation (true) returned", t, func() { + + Convey("when dataset.URI contains its path in appropriate url format ", func() { + dataset := createDataset() + dataset.ID = "123" + dataset.URI = "http://localhost:22000/datasets/123/breadcrumbs" + valid := ValidateDataset(testContext, &dataset) + So(valid, ShouldBeNil) + }) + }) + + Convey("Successful validation (true) returned", t, func() { + + Convey("when dataset.URI contains whitespace it should not return an error ", func() { + dataset := createDataset() + dataset.ID = "123" + dataset.URI = " http://localhost:22000/datasets/123/breadcrumbs " + valid := ValidateDataset(testContext, &dataset) + So(valid, ShouldBeNil) + }) + }) + +} + func TestValidateVersion(t *testing.T) { t.Parallel() Convey("Successfully return without any errors", t, func() { diff --git a/mongo/dataset_store.go b/mongo/dataset_store.go index 469cc671..46cd97e4 100644 --- a/mongo/dataset_store.go +++ b/mongo/dataset_store.go @@ -417,6 +417,11 @@ func createDatasetUpdateQuery(ctx context.Context, id string, dataset *models.Da updates["next.uri"] = dataset.URI } + if err := models.ValidateDataset(ctx, dataset); err != nil { + log.Event(ctx, "failed validation check to create dataset", log.ERROR, log.Error(err)) + return nil + } + log.Event(ctx, "built update query for dataset resource", log.INFO, log.Data{"dataset_id": id, "dataset": dataset, "updates": updates}) return updates diff --git a/mongo/dataset_test.go b/mongo/dataset_test.go index 17d5cd20..8338787f 100644 --- a/mongo/dataset_test.go +++ b/mongo/dataset_test.go @@ -197,7 +197,7 @@ func TestDatasetUpdateQuery(t *testing.T) { "next.release_frequency": "yearly", "next.theme": "construction", "next.title": "CPI", - "next.uri": "http://ons.gov.uk/dataset/123/landing-page", + "next.uri": "http://ons.gov.uk/datasets/123/landing-page", } dataset := &models.Dataset{ @@ -225,7 +225,7 @@ func TestDatasetUpdateQuery(t *testing.T) { ReleaseFrequency: "yearly", Theme: "construction", Title: "CPI", - URI: "http://ons.gov.uk/dataset/123/landing-page", + URI: "http://ons.gov.uk/datasets/123/landing-page", } selector := createDatasetUpdateQuery(testContext, "123", dataset, models.CreatedState) From 9235fc1a68546f79b4bb216c6cdc330d78312a78 Mon Sep 17 00:00:00 2001 From: Andre Urbani Date: Thu, 15 Oct 2020 11:31:21 +0100 Subject: [PATCH 14/15] made requested changes --- models/dataset_test.go | 21 +++++++++++---------- 1 file changed, 11 insertions(+), 10 deletions(-) diff --git a/models/dataset_test.go b/models/dataset_test.go index f5b32d5c..1430322f 100644 --- a/models/dataset_test.go +++ b/models/dataset_test.go @@ -159,17 +159,17 @@ func TestValidateDataset(t *testing.T) { Convey("when dataset.URI does not contain 'datasets' path", func() { dataset := createDataset() dataset.URI = "/123" - valid := ValidateDataset(testContext, &dataset) - So(valid, ShouldNotBeNil) - So(valid.Error(), ShouldResemble, errors.New("invalid fields: [URI]").Error()) + validationErr := ValidateDataset(testContext, &dataset) + So(validationErr, ShouldNotBeNil) + So(validationErr.Error(), ShouldResemble, errors.New("invalid fields: [URI]").Error()) }) Convey("when dataset.URI does not contain 'id' path", func() { dataset := createDataset() dataset.URI = "http://localhost:22000/datasets" - valid := ValidateDataset(testContext, &dataset) - So(valid, ShouldNotBeNil) - So(valid.Error(), ShouldResemble, errors.New("invalid fields: [URI]").Error()) + validationErr := ValidateDataset(testContext, &dataset) + So(validationErr, ShouldNotBeNil) + So(validationErr.Error(), ShouldResemble, errors.New("invalid fields: [URI]").Error()) }) }) @@ -180,8 +180,8 @@ func TestValidateDataset(t *testing.T) { dataset := createDataset() dataset.ID = "123" dataset.URI = "http://localhost:22000/datasets/123/breadcrumbs" - valid := ValidateDataset(testContext, &dataset) - So(valid, ShouldBeNil) + validationErr := ValidateDataset(testContext, &dataset) + So(validationErr, ShouldBeNil) }) }) @@ -191,8 +191,9 @@ func TestValidateDataset(t *testing.T) { dataset := createDataset() dataset.ID = "123" dataset.URI = " http://localhost:22000/datasets/123/breadcrumbs " - valid := ValidateDataset(testContext, &dataset) - So(valid, ShouldBeNil) + validationErr := ValidateDataset(testContext, &dataset) + So(validationErr, ShouldBeNil) + So(dataset.URI, ShouldEqual, "http://localhost:22000/datasets/123/breadcrumbs") }) }) From 2771c109488d50cec1502d122dde133bcad19710 Mon Sep 17 00:00:00 2001 From: CarlHembrough Date: Thu, 22 Oct 2020 08:36:58 +0100 Subject: [PATCH 15/15] Ensure graph db errors are consumed and logged --- go.mod | 2 +- go.sum | 4 +- service/initialise.go | 19 +++++++--- service/interfaces.go | 8 +++- service/mock/closer.go | 73 +++++++++++++++++++++++++++++++++++++ service/mock/initialiser.go | 8 ++-- service/service.go | 13 ++++++- service/service_test.go | 20 +++++++--- 8 files changed, 127 insertions(+), 20 deletions(-) create mode 100644 service/mock/closer.go diff --git a/go.mod b/go.mod index 8c280b1b..1eded4fa 100644 --- a/go.mod +++ b/go.mod @@ -5,7 +5,7 @@ go 1.13 require ( github.com/ONSdigital/dp-api-clients-go v1.30.0 github.com/ONSdigital/dp-authorisation v0.1.0 - github.com/ONSdigital/dp-graph/v2 v2.2.2 + github.com/ONSdigital/dp-graph/v2 v2.3.0 github.com/ONSdigital/dp-healthcheck v1.0.5 github.com/ONSdigital/dp-kafka v1.1.7 github.com/ONSdigital/dp-mongodb v1.4.0 diff --git a/go.sum b/go.sum index d8e717bc..6cb9dea6 100644 --- a/go.sum +++ b/go.sum @@ -7,8 +7,8 @@ github.com/ONSdigital/dp-api-clients-go v1.30.0/go.mod h1:iyJy6uRL4B6OYOJA0XMr5U github.com/ONSdigital/dp-authorisation v0.1.0 h1:HzYwJdvk7ZAeB56KMAH6MP5+5uZuuJnEyGq6CViDoCg= github.com/ONSdigital/dp-authorisation v0.1.0/go.mod h1:rT81tcvWto5/cUWUFd0Q6gTqBoRfQmD6Qp0sq7FyiMg= github.com/ONSdigital/dp-frontend-models v1.1.0/go.mod h1:TT96P7Mi69N3Tc/jFNdbjiwG4GAaMjP26HLotFQ6BPw= -github.com/ONSdigital/dp-graph/v2 v2.2.2 h1:hqA+BCHdxsDw3KawdiemRmtWBQhAxDHP+DXhhFpk3H8= -github.com/ONSdigital/dp-graph/v2 v2.2.2/go.mod h1:K4LIhFcyxB8g7nUG5I5I8x6QVf89x82dCEFBbE0mmaQ= +github.com/ONSdigital/dp-graph/v2 v2.3.0 h1:xK9qImVbh86l04aAUeurjB7d8mwn27eacP+5gpvPLO8= +github.com/ONSdigital/dp-graph/v2 v2.3.0/go.mod h1:K4LIhFcyxB8g7nUG5I5I8x6QVf89x82dCEFBbE0mmaQ= github.com/ONSdigital/dp-healthcheck v0.0.0-20200131122546-9db6d3f0494e/go.mod h1:zighxZ/0m5u7zo0eAr8XFlA+Dz2ic7A1vna6YXvhCjQ= github.com/ONSdigital/dp-healthcheck v1.0.0/go.mod h1:zighxZ/0m5u7zo0eAr8XFlA+Dz2ic7A1vna6YXvhCjQ= github.com/ONSdigital/dp-healthcheck v1.0.5 h1:DXnohGIqXaLLeYGdaGOhgkZjAbWMNoLAjQ3EgZeMT3M= diff --git a/service/initialise.go b/service/initialise.go index 02eea363..968a2c4c 100644 --- a/service/initialise.go +++ b/service/initialise.go @@ -60,13 +60,13 @@ func (e *ExternalServiceList) GetProducer(ctx context.Context, cfg *config.Confi } // GetGraphDB returns a graphDB (only if observation and private endpoint are enabled) -func (e *ExternalServiceList) GetGraphDB(ctx context.Context) (store.GraphDB, error) { - graphDB, err := e.Init.DoGetGraphDB(ctx) +func (e *ExternalServiceList) GetGraphDB(ctx context.Context) (store.GraphDB, Closer, error) { + graphDB, graphDBErrorConsumer, err := e.Init.DoGetGraphDB(ctx) if err != nil { - return nil, err + return nil, nil, err } e.Graph = true - return graphDB, nil + return graphDB, graphDBErrorConsumer, nil } // GetMongoDB returns a mongodb health client and dataset mongo object @@ -104,8 +104,15 @@ func (e *Init) DoGetKafkaProducer(ctx context.Context, cfg *config.Configuration } // DoGetGraphDB creates a new GraphDB -func (e *Init) DoGetGraphDB(ctx context.Context) (store.GraphDB, error) { - return graph.New(ctx, graph.Subsets{Observation: true, Instance: true}) +func (e *Init) DoGetGraphDB(ctx context.Context) (store.GraphDB, Closer, error) { + graphDB, err := graph.New(ctx, graph.Subsets{Observation: true, Instance: true}) + if err != nil { + return nil, nil, err + } + + graphDBErrorConsumer := graph.NewLoggingErrorConsumer(ctx, graphDB.ErrorChan()) + + return graphDB, graphDBErrorConsumer, nil } // DoGetMongoDB returns a MongoDB diff --git a/service/interfaces.go b/service/interfaces.go index 2f083117..4b26657c 100644 --- a/service/interfaces.go +++ b/service/interfaces.go @@ -13,13 +13,14 @@ import ( //go:generate moq -out mock/initialiser.go -pkg mock . Initialiser //go:generate moq -out mock/server.go -pkg mock . HTTPServer //go:generate moq -out mock/healthcheck.go -pkg mock . HealthChecker +//go:generate moq -out mock/closer.go -pkg mock . Closer // Initialiser defines the methods to initialise external services type Initialiser interface { DoGetHTTPServer(bindAddr string, router http.Handler) HTTPServer DoGetHealthCheck(cfg *config.Configuration, buildTime, gitCommit, version string) (HealthChecker, error) DoGetKafkaProducer(ctx context.Context, cfg *config.Configuration) (kafka.IProducer, error) - DoGetGraphDB(ctx context.Context) (store.GraphDB, error) + DoGetGraphDB(ctx context.Context) (store.GraphDB, Closer, error) DoGetMongoDB(ctx context.Context, cfg *config.Configuration) (store.MongoDB, error) } @@ -36,3 +37,8 @@ type HealthChecker interface { Stop() AddCheck(name string, checker healthcheck.Checker) (err error) } + +// Closer defines the required methods for a closable resource +type Closer interface { + Close(ctx context.Context) error +} diff --git a/service/mock/closer.go b/service/mock/closer.go new file mode 100644 index 00000000..5d4d2c94 --- /dev/null +++ b/service/mock/closer.go @@ -0,0 +1,73 @@ +// Code generated by moq; DO NOT EDIT. +// github.com/matryer/moq + +package mock + +import ( + "context" + "sync" +) + +var ( + lockCloserMockClose sync.RWMutex +) + +// CloserMock is a mock implementation of service.Closer. +// +// func TestSomethingThatUsesCloser(t *testing.T) { +// +// // make and configure a mocked service.Closer +// mockedCloser := &CloserMock{ +// CloseFunc: func(ctx context.Context) error { +// panic("mock out the Close method") +// }, +// } +// +// // use mockedCloser in code that requires service.Closer +// // and then make assertions. +// +// } +type CloserMock struct { + // CloseFunc mocks the Close method. + CloseFunc func(ctx context.Context) error + + // calls tracks calls to the methods. + calls struct { + // Close holds details about calls to the Close method. + Close []struct { + // Ctx is the ctx argument value. + Ctx context.Context + } + } +} + +// Close calls CloseFunc. +func (mock *CloserMock) Close(ctx context.Context) error { + if mock.CloseFunc == nil { + panic("CloserMock.CloseFunc: method is nil but Closer.Close was just called") + } + callInfo := struct { + Ctx context.Context + }{ + Ctx: ctx, + } + lockCloserMockClose.Lock() + mock.calls.Close = append(mock.calls.Close, callInfo) + lockCloserMockClose.Unlock() + return mock.CloseFunc(ctx) +} + +// CloseCalls gets all the calls that were made to Close. +// Check the length with: +// len(mockedCloser.CloseCalls()) +func (mock *CloserMock) CloseCalls() []struct { + Ctx context.Context +} { + var calls []struct { + Ctx context.Context + } + lockCloserMockClose.RLock() + calls = mock.calls.Close + lockCloserMockClose.RUnlock() + return calls +} diff --git a/service/mock/initialiser.go b/service/mock/initialiser.go index 42396962..a049b5a4 100644 --- a/service/mock/initialiser.go +++ b/service/mock/initialiser.go @@ -21,7 +21,7 @@ var ( lockInitialiserMockDoGetMongoDB sync.RWMutex ) -// Ensure, that InitialiserMock does implement service.Initialiser. +// Ensure, that InitialiserMock does implement Initialiser. // If this is not the case, regenerate this file with moq. var _ service.Initialiser = &InitialiserMock{} @@ -31,7 +31,7 @@ var _ service.Initialiser = &InitialiserMock{} // // // make and configure a mocked service.Initialiser // mockedInitialiser := &InitialiserMock{ -// DoGetGraphDBFunc: func(ctx context.Context) (store.GraphDB, error) { +// DoGetGraphDBFunc: func(ctx context.Context) (store.GraphDB, service.Closer, error) { // panic("mock out the DoGetGraphDB method") // }, // DoGetHTTPServerFunc: func(bindAddr string, router http.Handler) service.HTTPServer { @@ -54,7 +54,7 @@ var _ service.Initialiser = &InitialiserMock{} // } type InitialiserMock struct { // DoGetGraphDBFunc mocks the DoGetGraphDB method. - DoGetGraphDBFunc func(ctx context.Context) (store.GraphDB, error) + DoGetGraphDBFunc func(ctx context.Context) (store.GraphDB, service.Closer, error) // DoGetHTTPServerFunc mocks the DoGetHTTPServer method. DoGetHTTPServerFunc func(bindAddr string, router http.Handler) service.HTTPServer @@ -111,7 +111,7 @@ type InitialiserMock struct { } // DoGetGraphDB calls DoGetGraphDBFunc. -func (mock *InitialiserMock) DoGetGraphDB(ctx context.Context) (store.GraphDB, error) { +func (mock *InitialiserMock) DoGetGraphDB(ctx context.Context) (store.GraphDB, service.Closer, error) { if mock.DoGetGraphDBFunc == nil { panic("InitialiserMock.DoGetGraphDBFunc: method is nil but Initialiser.DoGetGraphDB was just called") } diff --git a/service/service.go b/service/service.go index 3b7eacab..ea11c1cf 100644 --- a/service/service.go +++ b/service/service.go @@ -36,6 +36,7 @@ type Service struct { config *config.Configuration serviceList *ExternalServiceList graphDB store.GraphDB + graphDBErrorConsumer Closer mongoDB store.MongoDB generateDownloadsProducer kafka.IProducer identityClient *clientsidentity.Client @@ -78,6 +79,11 @@ func (svc *Service) SetGraphDB(graphDB store.GraphDB) { svc.graphDB = graphDB } +// SetGraphDBErrorConsumer sets the graphDB error consumer for a service +func (svc *Service) SetGraphDBErrorConsumer(graphDBErrorConsumer Closer) { + svc.graphDBErrorConsumer = graphDBErrorConsumer +} + // Run the service func (svc *Service) Run(ctx context.Context, buildTime, gitCommit, version string, svcErrors chan error) (err error) { @@ -95,7 +101,7 @@ func (svc *Service) Run(ctx context.Context, buildTime, gitCommit, version strin "EnablePrivateEndpoints": svc.config.EnablePrivateEndpoints, }) } else { - svc.graphDB, err = svc.serviceList.GetGraphDB(ctx) + svc.graphDB, svc.graphDBErrorConsumer, err = svc.serviceList.GetGraphDB(ctx) if err != nil { log.Event(ctx, "failed to initialise graph driver", log.FATAL, log.Error(err)) return err @@ -260,6 +266,11 @@ func (svc *Service) Close(ctx context.Context) error { log.Event(shutdownContext, "failed to close graph db", log.ERROR, log.Error(err)) hasShutdownError = true } + + if err := svc.graphDBErrorConsumer.Close(shutdownContext); err != nil { + log.Event(shutdownContext, "failed to close graph db error consumer", log.ERROR, log.Error(err)) + hasShutdownError = true + } } }() diff --git a/service/service_test.go b/service/service_test.go index b7216cf8..1ae34c0a 100644 --- a/service/service_test.go +++ b/service/service_test.go @@ -47,8 +47,8 @@ var funcDoGetMongoDBErr = func(ctx context.Context, cfg *config.Configuration) ( return nil, errMongo } -var funcDoGetGraphDBErr = func(ctx context.Context) (store.GraphDB, error) { - return nil, errGraph +var funcDoGetGraphDBErr = func(ctx context.Context) (store.GraphDB, service.Closer, error) { + return nil, nil, errGraph } var funcDoGetKafkaProducerErr = func(ctx context.Context, cfg *config.Configuration) (kafka.IProducer, error) { @@ -99,8 +99,11 @@ func TestRun(t *testing.T) { return &storeMock.MongoDBMock{}, nil } - funcDoGetGraphDBOk := func(ctx context.Context) (store.GraphDB, error) { - return &storeMock.GraphDBMock{}, nil + funcDoGetGraphDBOk := func(ctx context.Context) (store.GraphDB, service.Closer, error) { + var funcClose = func(ctx context.Context) error { + return nil + } + return &storeMock.GraphDBMock{}, &serviceMock.CloserMock{CloseFunc: funcClose}, nil } funcDoGetKafkaProducerOk := func(ctx context.Context, cfg *config.Configuration) (kafka.IProducer, error) { @@ -370,6 +373,10 @@ func TestClose(t *testing.T) { CloseFunc: funcClose, } + graphErrorConsumerMock := &serviceMock.CloserMock{ + CloseFunc: funcClose, + } + // Kafka producer will fail if healthcheck or http server are not stopped kafkaProducerMock := &kafkatest.IProducerMock{ ChannelsFunc: func() *kafka.ProducerChannels { @@ -405,12 +412,14 @@ func TestClose(t *testing.T) { svc.SetDownloadsProducer(kafkaProducerMock) svc.SetMongoDB(mongoMock) svc.SetGraphDB(graphMock) + svc.SetGraphDBErrorConsumer(graphErrorConsumerMock) err = svc.Close(context.Background()) So(err, ShouldBeNil) So(len(hcMock.StopCalls()), ShouldEqual, 1) So(len(serverMock.ShutdownCalls()), ShouldEqual, 1) So(len(mongoMock.CloseCalls()), ShouldEqual, 1) So(len(graphMock.CloseCalls()), ShouldEqual, 1) + So(len(graphErrorConsumerMock.CloseCalls()), ShouldEqual, 1) So(len(kafkaProducerMock.CloseCalls()), ShouldEqual, 1) }) @@ -428,6 +437,7 @@ func TestClose(t *testing.T) { svc.SetDownloadsProducer(kafkaProducerMock) svc.SetMongoDB(mongoMock) svc.SetGraphDB(graphMock) + svc.SetGraphDBErrorConsumer(graphErrorConsumerMock) err = svc.Close(context.Background()) So(err, ShouldNotBeNil) So(err.Error(), ShouldResemble, "failed to shutdown gracefully") @@ -435,8 +445,8 @@ func TestClose(t *testing.T) { So(len(failingserverMock.ShutdownCalls()), ShouldEqual, 1) So(len(mongoMock.CloseCalls()), ShouldEqual, 1) So(len(graphMock.CloseCalls()), ShouldEqual, 1) + So(len(graphErrorConsumerMock.CloseCalls()), ShouldEqual, 1) So(len(kafkaProducerMock.CloseCalls()), ShouldEqual, 1) - }) }) }