diff --git a/.travis.yml b/.travis.yml index afa9ab503f21..3ae88aab6df1 100644 --- a/.travis.yml +++ b/.travis.yml @@ -30,8 +30,6 @@ matrix: go: 1.10.x script: - unset -f cd # workaround for https://github.com/travis-ci/travis-ci/issues/8703 - - brew update - - brew cask install osxfuse - go run build/ci.go install - go run build/ci.go test -coverage $TEST_PACKAGES diff --git a/AUTHORS b/AUTHORS index bd44a3de553c..609dc48c726b 100644 --- a/AUTHORS +++ b/AUTHORS @@ -171,3 +171,4 @@ xiekeyang yoza ΞTHΞЯSPHΞЯΞ <{viktor.tron,nagydani,zsfelfoldi}@gmail.com> Максим Чусовлянов +Ralph Caraveo diff --git a/Makefile b/Makefile index 5cb9231a1871..0c1bb3bce70c 100644 --- a/Makefile +++ b/Makefile @@ -41,6 +41,7 @@ lint: ## Run linters. build/env.sh go run build/ci.go lint clean: + ./build/clean_go_build_cache.sh rm -fr build/_workspace/pkg/ $(GOBIN)/* # The devtools target installs tools required for 'go generate'. diff --git a/README.md b/README.md index b2e992a4ef4f..c6bc91af174b 100644 --- a/README.md +++ b/README.md @@ -34,13 +34,13 @@ The go-ethereum project comes with several wrappers/executables found in the `cm | Command | Description | |:----------:|-------------| -| **`geth`** | Our main Ethereum CLI client. It is the entry point into the Ethereum network (main-, test- or private net), capable of running as a full node (default) archive node (retaining all historical state) or a light node (retrieving data live). It can be used by other processes as a gateway into the Ethereum network via JSON RPC endpoints exposed on top of HTTP, WebSocket and/or IPC transports. `geth --help` and the [CLI Wiki page](https://github.com/ethereum/go-ethereum/wiki/Command-Line-Options) for command line options. | +| **`geth`** | Our main Ethereum CLI client. It is the entry point into the Ethereum network (main-, test- or private net), capable of running as a full node (default), archive node (retaining all historical state) or a light node (retrieving data live). It can be used by other processes as a gateway into the Ethereum network via JSON RPC endpoints exposed on top of HTTP, WebSocket and/or IPC transports. `geth --help` and the [CLI Wiki page](https://github.com/ethereum/go-ethereum/wiki/Command-Line-Options) for command line options. | | `abigen` | Source code generator to convert Ethereum contract definitions into easy to use, compile-time type-safe Go packages. It operates on plain [Ethereum contract ABIs](https://github.com/ethereum/wiki/wiki/Ethereum-Contract-ABI) with expanded functionality if the contract bytecode is also available. However it also accepts Solidity source files, making development much more streamlined. Please see our [Native DApps](https://github.com/ethereum/go-ethereum/wiki/Native-DApps:-Go-bindings-to-Ethereum-contracts) wiki page for details. | | `bootnode` | Stripped down version of our Ethereum client implementation that only takes part in the network node discovery protocol, but does not run any of the higher level application protocols. It can be used as a lightweight bootstrap node to aid in finding peers in private networks. | | `evm` | Developer utility version of the EVM (Ethereum Virtual Machine) that is capable of running bytecode snippets within a configurable environment and execution mode. Its purpose is to allow isolated, fine-grained debugging of EVM opcodes (e.g. `evm --code 60ff60ff --debug`). | | `gethrpctest` | Developer utility tool to support our [ethereum/rpc-test](https://github.com/ethereum/rpc-tests) test suite which validates baseline conformity to the [Ethereum JSON RPC](https://github.com/ethereum/wiki/wiki/JSON-RPC) specs. Please see the [test suite's readme](https://github.com/ethereum/rpc-tests/blob/master/README.md) for details. | | `rlpdump` | Developer utility tool to convert binary RLP ([Recursive Length Prefix](https://github.com/ethereum/wiki/wiki/RLP)) dumps (data encoding used by the Ethereum protocol both network as well as consensus wise) to user friendlier hierarchical representation (e.g. `rlpdump --hex CE0183FFFFFFC4C304050583616263`). | -| `swarm` | swarm daemon and tools. This is the entrypoint for the swarm network. `swarm --help` for command line options and subcommands. See https://swarm-guide.readthedocs.io for swarm documentation. | +| `swarm` | Swarm daemon and tools. This is the entrypoint for the Swarm network. `swarm --help` for command line options and subcommands. See [Swarm README](https://github.com/ethereum/go-ethereum/tree/master/swarm) for more information. | | `puppeth` | a CLI wizard that aids in creating a new Ethereum network. | ## Running geth @@ -69,7 +69,7 @@ This command will: * Start up Geth's built-in interactive [JavaScript console](https://github.com/ethereum/go-ethereum/wiki/JavaScript-Console), (via the trailing `console` subcommand) through which you can invoke all official [`web3` methods](https://github.com/ethereum/wiki/wiki/JavaScript-API) as well as Geth's own [management APIs](https://github.com/ethereum/go-ethereum/wiki/Management-APIs). - This too is optional and if you leave it out you can always attach to an already running Geth instance + This tool is optional and if you leave it out you can always attach to an already running Geth instance with `geth attach`. ### Full node on the Ethereum test network diff --git a/VERSION b/VERSION deleted file mode 100644 index 7d2424c90b0f..000000000000 --- a/VERSION +++ /dev/null @@ -1 +0,0 @@ -1.8.12 diff --git a/accounts/abi/bind/backends/simulated.go b/accounts/abi/bind/backends/simulated.go index fd69538d535e..1d14f8c6f117 100644 --- a/accounts/abi/bind/backends/simulated.go +++ b/accounts/abi/bind/backends/simulated.go @@ -65,9 +65,9 @@ type SimulatedBackend struct { // NewSimulatedBackend creates a new binding backend using a simulated blockchain // for testing purposes. -func NewSimulatedBackend(alloc core.GenesisAlloc) *SimulatedBackend { +func NewSimulatedBackend(alloc core.GenesisAlloc, gasLimit uint64) *SimulatedBackend { database := ethdb.NewMemDatabase() - genesis := core.Genesis{Config: params.AllEthashProtocolChanges, Alloc: alloc} + genesis := core.Genesis{Config: params.AllEthashProtocolChanges, GasLimit: gasLimit, Alloc: alloc} genesis.MustCommit(database) blockchain, _ := core.NewBlockChain(database, nil, genesis.Config, ethash.NewFaker(), vm.Config{}) @@ -324,18 +324,24 @@ func (b *SimulatedBackend) SendTransaction(ctx context.Context, tx *types.Transa // // TODO(karalabe): Deprecate when the subscription one can return past data too. func (b *SimulatedBackend) FilterLogs(ctx context.Context, query ethereum.FilterQuery) ([]types.Log, error) { - // Initialize unset filter boundaried to run from genesis to chain head - from := int64(0) - if query.FromBlock != nil { - from = query.FromBlock.Int64() - } - to := int64(-1) - if query.ToBlock != nil { - to = query.ToBlock.Int64() + var filter *filters.Filter + if query.BlockHash != nil { + // Block filter requested, construct a single-shot filter + filter = filters.NewBlockFilter(&filterBackend{b.database, b.blockchain}, *query.BlockHash, query.Addresses, query.Topics) + } else { + // Initialize unset filter boundaried to run from genesis to chain head + from := int64(0) + if query.FromBlock != nil { + from = query.FromBlock.Int64() + } + to := int64(-1) + if query.ToBlock != nil { + to = query.ToBlock.Int64() + } + // Construct the range filter + filter = filters.NewRangeFilter(&filterBackend{b.database, b.blockchain}, from, to, query.Addresses, query.Topics) } - // Construct and execute the filter - filter := filters.New(&filterBackend{b.database, b.blockchain}, from, to, query.Addresses, query.Topics) - + // Run the filter and return all the logs logs, err := filter.Logs(ctx) if err != nil { return nil, err @@ -430,6 +436,10 @@ func (fb *filterBackend) HeaderByNumber(ctx context.Context, block rpc.BlockNumb return fb.bc.GetHeaderByNumber(uint64(block.Int64())), nil } +func (fb *filterBackend) HeaderByHash(ctx context.Context, hash common.Hash) (*types.Header, error) { + return fb.bc.GetHeaderByHash(hash), nil +} + func (fb *filterBackend) GetReceipts(ctx context.Context, hash common.Hash) (types.Receipts, error) { number := rawdb.ReadHeaderNumber(fb.db, hash) if number == nil { diff --git a/accounts/abi/bind/bind_test.go b/accounts/abi/bind/bind_test.go index 2a5a88648771..0e5b1c1616f1 100644 --- a/accounts/abi/bind/bind_test.go +++ b/accounts/abi/bind/bind_test.go @@ -229,7 +229,7 @@ var bindTests = []struct { // Generate a new random account and a funded simulator key, _ := crypto.GenerateKey() auth := bind.NewKeyedTransactor(key) - sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000)}}) + sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000)}}, 10000000) // Deploy an interaction tester contract and call a transaction on it _, _, interactor, err := DeployInteractor(auth, sim, "Deploy string") @@ -270,7 +270,7 @@ var bindTests = []struct { // Generate a new random account and a funded simulator key, _ := crypto.GenerateKey() auth := bind.NewKeyedTransactor(key) - sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000)}}) + sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000)}}, 10000000) // Deploy a tuple tester contract and execute a structured call on it _, _, getter, err := DeployGetter(auth, sim) @@ -302,7 +302,7 @@ var bindTests = []struct { // Generate a new random account and a funded simulator key, _ := crypto.GenerateKey() auth := bind.NewKeyedTransactor(key) - sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000)}}) + sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000)}}, 10000000) // Deploy a tuple tester contract and execute a structured call on it _, _, tupler, err := DeployTupler(auth, sim) @@ -344,7 +344,7 @@ var bindTests = []struct { // Generate a new random account and a funded simulator key, _ := crypto.GenerateKey() auth := bind.NewKeyedTransactor(key) - sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000)}}) + sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000)}}, 10000000) // Deploy a slice tester contract and execute a n array call on it _, _, slicer, err := DeploySlicer(auth, sim) @@ -378,7 +378,7 @@ var bindTests = []struct { // Generate a new random account and a funded simulator key, _ := crypto.GenerateKey() auth := bind.NewKeyedTransactor(key) - sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000)}}) + sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000)}}, 10000000) // Deploy a default method invoker contract and execute its default method _, _, defaulter, err := DeployDefaulter(auth, sim) @@ -411,7 +411,7 @@ var bindTests = []struct { `[{"constant":true,"inputs":[],"name":"String","outputs":[{"name":"","type":"string"}],"type":"function"}]`, ` // Create a simulator and wrap a non-deployed contract - sim := backends.NewSimulatedBackend(nil) + sim := backends.NewSimulatedBackend(nil, uint64(10000000000)) nonexistent, err := NewNonExistent(common.Address{}, sim) if err != nil { @@ -447,7 +447,7 @@ var bindTests = []struct { // Generate a new random account and a funded simulator key, _ := crypto.GenerateKey() auth := bind.NewKeyedTransactor(key) - sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000)}}) + sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000)}}, 10000000) // Deploy a funky gas pattern contract _, _, limiter, err := DeployFunkyGasPattern(auth, sim) @@ -482,7 +482,7 @@ var bindTests = []struct { // Generate a new random account and a funded simulator key, _ := crypto.GenerateKey() auth := bind.NewKeyedTransactor(key) - sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000)}}) + sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000)}}, 10000000) // Deploy a sender tester contract and execute a structured call on it _, _, callfrom, err := DeployCallFrom(auth, sim) @@ -542,7 +542,7 @@ var bindTests = []struct { // Generate a new random account and a funded simulator key, _ := crypto.GenerateKey() auth := bind.NewKeyedTransactor(key) - sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000)}}) + sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000)}}, 10000000) // Deploy a underscorer tester contract and execute a structured call on it _, _, underscorer, err := DeployUnderscorer(auth, sim) @@ -612,7 +612,7 @@ var bindTests = []struct { // Generate a new random account and a funded simulator key, _ := crypto.GenerateKey() auth := bind.NewKeyedTransactor(key) - sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000)}}) + sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000)}}, 10000000) // Deploy an eventer contract _, _, eventer, err := DeployEventer(auth, sim) @@ -761,7 +761,7 @@ var bindTests = []struct { // Generate a new random account and a funded simulator key, _ := crypto.GenerateKey() auth := bind.NewKeyedTransactor(key) - sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000)}}) + sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000)}}, 10000000) //deploy the test contract _, _, testContract, err := DeployDeeplyNestedArray(auth, sim) @@ -820,7 +820,7 @@ func TestBindings(t *testing.T) { t.Skip("go sdk not found for testing") } // Skip the test if the go-ethereum sources are symlinked (https://github.com/golang/go/issues/14845) - linkTestCode := fmt.Sprintf("package linktest\nfunc CheckSymlinks(){\nfmt.Println(backends.NewSimulatedBackend(nil))\n}") + linkTestCode := fmt.Sprintf("package linktest\nfunc CheckSymlinks(){\nfmt.Println(backends.NewSimulatedBackend(nil,uint64(10000000000)))\n}") linkTestDeps, err := imports.Process(os.TempDir(), []byte(linkTestCode), nil) if err != nil { t.Fatalf("failed check for goimports symlink bug: %v", err) diff --git a/accounts/abi/bind/util_test.go b/accounts/abi/bind/util_test.go index 49e6dc813a33..8f4092971f2f 100644 --- a/accounts/abi/bind/util_test.go +++ b/accounts/abi/bind/util_test.go @@ -53,9 +53,11 @@ var waitDeployedTests = map[string]struct { func TestWaitDeployed(t *testing.T) { for name, test := range waitDeployedTests { - backend := backends.NewSimulatedBackend(core.GenesisAlloc{ - crypto.PubkeyToAddress(testKey.PublicKey): {Balance: big.NewInt(10000000000)}, - }) + backend := backends.NewSimulatedBackend( + core.GenesisAlloc{ + crypto.PubkeyToAddress(testKey.PublicKey): {Balance: big.NewInt(10000000000)}, + }, 10000000, + ) // Create the transaction. tx := types.NewContractCreation(0, big.NewInt(0), test.gas, big.NewInt(1), common.FromHex(test.code)) diff --git a/accounts/abi/method.go b/accounts/abi/method.go index f434ffdbef6c..5831057652b6 100644 --- a/accounts/abi/method.go +++ b/accounts/abi/method.go @@ -47,10 +47,8 @@ type Method struct { // Please note that "int" is substitute for its canonical representation "int256" func (method Method) Sig() string { types := make([]string, len(method.Inputs)) - i := 0 - for _, input := range method.Inputs { + for i, input := range method.Inputs { types[i] = input.Type.String() - i++ } return fmt.Sprintf("%v(%v)", method.Name, strings.Join(types, ",")) } diff --git a/accounts/accounts.go b/accounts/accounts.go index 76951e1a42a6..cb1eae281587 100644 --- a/accounts/accounts.go +++ b/accounts/accounts.go @@ -106,7 +106,7 @@ type Wallet interface { // or optionally with the aid of any location metadata from the embedded URL field. // // If the wallet requires additional authentication to sign the request (e.g. - // a password to decrypt the account, or a PIN code o verify the transaction), + // a password to decrypt the account, or a PIN code to verify the transaction), // an AuthNeededError instance will be returned, containing infos for the user // about which fields or actions are needed. The user may retry by providing // the needed details via SignTxWithPassphrase, or by other means (e.g. unlock diff --git a/accounts/keystore/account_cache.go b/accounts/keystore/account_cache.go index 71f698ece71a..da3a46eb8090 100644 --- a/accounts/keystore/account_cache.go +++ b/accounts/keystore/account_cache.go @@ -27,10 +27,10 @@ import ( "sync" "time" + mapset "github.com/deckarep/golang-set" "github.com/ethereum/go-ethereum/accounts" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/log" - "gopkg.in/fatih/set.v0" ) // Minimum amount of time between cache reloads. This limit applies if the platform does @@ -79,7 +79,7 @@ func newAccountCache(keydir string) (*accountCache, chan struct{}) { keydir: keydir, byAddr: make(map[common.Address][]accounts.Account), notify: make(chan struct{}, 1), - fileC: fileCache{all: set.NewNonTS()}, + fileC: fileCache{all: mapset.NewThreadUnsafeSet()}, } ac.watcher = newWatcher(ac) return ac, ac.notify @@ -237,7 +237,7 @@ func (ac *accountCache) scanAccounts() error { log.Debug("Failed to reload keystore contents", "err", err) return err } - if creates.Size() == 0 && deletes.Size() == 0 && updates.Size() == 0 { + if creates.Cardinality() == 0 && deletes.Cardinality() == 0 && updates.Cardinality() == 0 { return nil } // Create a helper method to scan the contents of the key files @@ -272,15 +272,15 @@ func (ac *accountCache) scanAccounts() error { // Process all the file diffs start := time.Now() - for _, p := range creates.List() { + for _, p := range creates.ToSlice() { if a := readAccount(p.(string)); a != nil { ac.add(*a) } } - for _, p := range deletes.List() { + for _, p := range deletes.ToSlice() { ac.deleteByFile(p.(string)) } - for _, p := range updates.List() { + for _, p := range updates.ToSlice() { path := p.(string) ac.deleteByFile(path) if a := readAccount(path); a != nil { diff --git a/accounts/keystore/file_cache.go b/accounts/keystore/file_cache.go index c91b7b7b6104..73ff6ae9ee6f 100644 --- a/accounts/keystore/file_cache.go +++ b/accounts/keystore/file_cache.go @@ -24,20 +24,20 @@ import ( "sync" "time" + mapset "github.com/deckarep/golang-set" "github.com/ethereum/go-ethereum/log" - set "gopkg.in/fatih/set.v0" ) // fileCache is a cache of files seen during scan of keystore. type fileCache struct { - all *set.SetNonTS // Set of all files from the keystore folder - lastMod time.Time // Last time instance when a file was modified + all mapset.Set // Set of all files from the keystore folder + lastMod time.Time // Last time instance when a file was modified mu sync.RWMutex } // scan performs a new scan on the given directory, compares against the already // cached filenames, and returns file sets: creates, deletes, updates. -func (fc *fileCache) scan(keyDir string) (set.Interface, set.Interface, set.Interface, error) { +func (fc *fileCache) scan(keyDir string) (mapset.Set, mapset.Set, mapset.Set, error) { t0 := time.Now() // List all the failes from the keystore folder @@ -51,14 +51,14 @@ func (fc *fileCache) scan(keyDir string) (set.Interface, set.Interface, set.Inte defer fc.mu.Unlock() // Iterate all the files and gather their metadata - all := set.NewNonTS() - mods := set.NewNonTS() + all := mapset.NewThreadUnsafeSet() + mods := mapset.NewThreadUnsafeSet() var newLastMod time.Time for _, fi := range files { - // Skip any non-key files from the folder path := filepath.Join(keyDir, fi.Name()) - if skipKeyFile(fi) { + // Skip any non-key files from the folder + if nonKeyFile(fi) { log.Trace("Ignoring file on account scan", "path", path) continue } @@ -76,9 +76,9 @@ func (fc *fileCache) scan(keyDir string) (set.Interface, set.Interface, set.Inte t2 := time.Now() // Update the tracked files and return the three sets - deletes := set.Difference(fc.all, all) // Deletes = previous - current - creates := set.Difference(all, fc.all) // Creates = current - previous - updates := set.Difference(mods, creates) // Updates = modified - creates + deletes := fc.all.Difference(all) // Deletes = previous - current + creates := all.Difference(fc.all) // Creates = current - previous + updates := mods.Difference(creates) // Updates = modified - creates fc.all, fc.lastMod = all, newLastMod t3 := time.Now() @@ -88,8 +88,8 @@ func (fc *fileCache) scan(keyDir string) (set.Interface, set.Interface, set.Inte return creates, deletes, updates, nil } -// skipKeyFile ignores editor backups, hidden files and folders/symlinks. -func skipKeyFile(fi os.FileInfo) bool { +// nonKeyFile ignores editor backups, hidden files and folders/symlinks. +func nonKeyFile(fi os.FileInfo) bool { // Skip editor backups and UNIX-style hidden files. if strings.HasSuffix(fi.Name(), "~") || strings.HasPrefix(fi.Name(), ".") { return true diff --git a/accounts/keystore/keystore_passphrase.go b/accounts/keystore/keystore_passphrase.go index da632fe3459f..59738abe1546 100644 --- a/accounts/keystore/keystore_passphrase.go +++ b/accounts/keystore/keystore_passphrase.go @@ -28,18 +28,18 @@ package keystore import ( "bytes" "crypto/aes" - crand "crypto/rand" + "crypto/rand" "crypto/sha256" "encoding/hex" "encoding/json" "fmt" + "io" "io/ioutil" "path/filepath" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/math" "github.com/ethereum/go-ethereum/crypto" - "github.com/ethereum/go-ethereum/crypto/randentropy" "github.com/pborman/uuid" "golang.org/x/crypto/pbkdf2" "golang.org/x/crypto/scrypt" @@ -93,7 +93,7 @@ func (ks keyStorePassphrase) GetKey(addr common.Address, filename, auth string) // StoreKey generates a key, encrypts with 'auth' and stores in the given directory func StoreKey(dir, auth string, scryptN, scryptP int) (common.Address, error) { - _, a, err := storeNewKey(&keyStorePassphrase{dir, scryptN, scryptP}, crand.Reader, auth) + _, a, err := storeNewKey(&keyStorePassphrase{dir, scryptN, scryptP}, rand.Reader, auth) return a.Address, err } @@ -116,7 +116,11 @@ func (ks keyStorePassphrase) JoinPath(filename string) string { // blob that can be decrypted later on. func EncryptKey(key *Key, auth string, scryptN, scryptP int) ([]byte, error) { authArray := []byte(auth) - salt := randentropy.GetEntropyCSPRNG(32) + + salt := make([]byte, 32) + if _, err := io.ReadFull(rand.Reader, salt); err != nil { + panic("reading from crypto/rand failed: " + err.Error()) + } derivedKey, err := scrypt.Key(authArray, salt, scryptN, scryptR, scryptP, scryptDKLen) if err != nil { return nil, err @@ -124,7 +128,10 @@ func EncryptKey(key *Key, auth string, scryptN, scryptP int) ([]byte, error) { encryptKey := derivedKey[:16] keyBytes := math.PaddedBigBytes(key.PrivateKey.D, 32) - iv := randentropy.GetEntropyCSPRNG(aes.BlockSize) // 16 + iv := make([]byte, aes.BlockSize) // 16 + if _, err := io.ReadFull(rand.Reader, iv); err != nil { + panic("reading from crypto/rand failed: " + err.Error()) + } cipherText, err := aesCTRXOR(encryptKey, keyBytes, iv) if err != nil { return nil, err diff --git a/accounts/url.go b/accounts/url.go index 21df668efd26..a5add1021687 100644 --- a/accounts/url.go +++ b/accounts/url.go @@ -76,12 +76,12 @@ func (u URL) MarshalJSON() ([]byte, error) { // UnmarshalJSON parses url. func (u *URL) UnmarshalJSON(input []byte) error { - var textUrl string - err := json.Unmarshal(input, &textUrl) + var textURL string + err := json.Unmarshal(input, &textURL) if err != nil { return err } - url, err := parseURL(textUrl) + url, err := parseURL(textURL) if err != nil { return err } diff --git a/accounts/url_test.go b/accounts/url_test.go new file mode 100644 index 000000000000..802772871947 --- /dev/null +++ b/accounts/url_test.go @@ -0,0 +1,96 @@ +// Copyright 2017 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package accounts + +import ( + "testing" +) + +func TestURLParsing(t *testing.T) { + url, err := parseURL("https://ethereum.org") + if err != nil { + t.Errorf("unexpected error: %v", err) + } + if url.Scheme != "https" { + t.Errorf("expected: %v, got: %v", "https", url.Scheme) + } + if url.Path != "ethereum.org" { + t.Errorf("expected: %v, got: %v", "ethereum.org", url.Path) + } + + _, err = parseURL("ethereum.org") + if err == nil { + t.Error("expected err, got: nil") + } +} + +func TestURLString(t *testing.T) { + url := URL{Scheme: "https", Path: "ethereum.org"} + if url.String() != "https://ethereum.org" { + t.Errorf("expected: %v, got: %v", "https://ethereum.org", url.String()) + } + + url = URL{Scheme: "", Path: "ethereum.org"} + if url.String() != "ethereum.org" { + t.Errorf("expected: %v, got: %v", "ethereum.org", url.String()) + } +} + +func TestURLMarshalJSON(t *testing.T) { + url := URL{Scheme: "https", Path: "ethereum.org"} + json, err := url.MarshalJSON() + if err != nil { + t.Errorf("unexpcted error: %v", err) + } + if string(json) != "\"https://ethereum.org\"" { + t.Errorf("expected: %v, got: %v", "\"https://ethereum.org\"", string(json)) + } +} + +func TestURLUnmarshalJSON(t *testing.T) { + url := &URL{} + err := url.UnmarshalJSON([]byte("\"https://ethereum.org\"")) + if err != nil { + t.Errorf("unexpcted error: %v", err) + } + if url.Scheme != "https" { + t.Errorf("expected: %v, got: %v", "https", url.Scheme) + } + if url.Path != "ethereum.org" { + t.Errorf("expected: %v, got: %v", "https", url.Path) + } +} + +func TestURLComparison(t *testing.T) { + tests := []struct { + urlA URL + urlB URL + expect int + }{ + {URL{"https", "ethereum.org"}, URL{"https", "ethereum.org"}, 0}, + {URL{"http", "ethereum.org"}, URL{"https", "ethereum.org"}, -1}, + {URL{"https", "ethereum.org/a"}, URL{"https", "ethereum.org"}, 1}, + {URL{"https", "abc.org"}, URL{"https", "ethereum.org"}, -1}, + } + + for i, tt := range tests { + result := tt.urlA.Cmp(tt.urlB) + if result != tt.expect { + t.Errorf("test %d: cmp mismatch: expected: %d, got: %d", i, tt.expect, result) + } + } +} diff --git a/build/ci.go b/build/ci.go index 5939d91e9689..40252cbde1ff 100644 --- a/build/ci.go +++ b/build/ci.go @@ -26,7 +26,7 @@ Available commands are: install [ -arch architecture ] [ -cc compiler ] [ packages... ] -- builds packages and executables test [ -coverage ] [ packages... ] -- runs the tests lint -- runs certain pre-selected linters - archive [ -arch architecture ] [ -type zip|tar ] [ -signer key-envvar ] [ -upload dest ] -- archives build artefacts + archive [ -arch architecture ] [ -type zip|tar ] [ -signer key-envvar ] [ -upload dest ] -- archives build artifacts importkeys -- imports signing keys from env debsrc [ -signer key-id ] [ -upload dest ] -- creates a debian source package nsis -- creates a Windows NSIS installer @@ -59,6 +59,8 @@ import ( "time" "github.com/ethereum/go-ethereum/internal/build" + "github.com/ethereum/go-ethereum/params" + sv "github.com/ethereum/go-ethereum/swarm/version" ) var ( @@ -77,52 +79,84 @@ var ( executablePath("geth"), executablePath("puppeth"), executablePath("rlpdump"), - executablePath("swarm"), executablePath("wnode"), } + // Files that end up in the swarm*.zip archive. + swarmArchiveFiles = []string{ + "COPYING", + executablePath("swarm"), + } + // A debian package is created for all executables listed here. debExecutables = []debExecutable{ { - Name: "abigen", + BinaryName: "abigen", Description: "Source code generator to convert Ethereum contract definitions into easy to use, compile-time type-safe Go packages.", }, { - Name: "bootnode", + BinaryName: "bootnode", Description: "Ethereum bootnode.", }, { - Name: "evm", + BinaryName: "evm", Description: "Developer utility version of the EVM (Ethereum Virtual Machine) that is capable of running bytecode snippets within a configurable environment and execution mode.", }, { - Name: "geth", + BinaryName: "geth", Description: "Ethereum CLI client.", }, { - Name: "puppeth", + BinaryName: "puppeth", Description: "Ethereum private network manager.", }, { - Name: "rlpdump", + BinaryName: "rlpdump", Description: "Developer utility tool that prints RLP structures.", }, { - Name: "swarm", - Description: "Ethereum Swarm daemon and tools", + BinaryName: "wnode", + Description: "Ethereum Whisper diagnostic tool", }, + } + + // A debian package is created for all executables listed here. + debSwarmExecutables = []debExecutable{ { - Name: "wnode", - Description: "Ethereum Whisper diagnostic tool", + BinaryName: "swarm", + PackageName: "ethereum-swarm", + Description: "Ethereum Swarm daemon and tools", }, } + debEthereum = debPackage{ + Name: "ethereum", + Version: params.Version, + Executables: debExecutables, + } + + debSwarm = debPackage{ + Name: "ethereum-swarm", + Version: sv.Version, + Executables: debSwarmExecutables, + } + + // Debian meta packages to build and push to Ubuntu PPA + debPackages = []debPackage{ + debSwarm, + debEthereum, + } + + // Packages to be cross-compiled by the xgo command + allCrossCompiledArchiveFiles = append(allToolsArchiveFiles, swarmArchiveFiles...) + // Distros for which packages are created. // Note: vivid is unsupported because there is no golang-1.6 package for it. // Note: wily is unsupported because it was officially deprecated on lanchpad. // Note: yakkety is unsupported because it was officially deprecated on lanchpad. // Note: zesty is unsupported because it was officially deprecated on lanchpad. - debDistros = []string{"trusty", "xenial", "artful", "bionic"} + // Note: artful is unsupported because it was officially deprecated on lanchpad. + debDistros = []string{"trusty", "xenial", "bionic", "cosmic"} ) var GOBIN, _ = filepath.Abs(filepath.Join("build", "bin")) @@ -350,7 +384,6 @@ func doLint(cmdline []string) { } // Release Packaging - func doArchive(cmdline []string) { var ( arch = flag.String("arch", runtime.GOARCH, "Architecture cross packaging") @@ -370,10 +403,14 @@ func doArchive(cmdline []string) { } var ( - env = build.Env() - base = archiveBasename(*arch, env) - geth = "geth-" + base + ext - alltools = "geth-alltools-" + base + ext + env = build.Env() + + basegeth = archiveBasename(*arch, params.ArchiveVersion(env.Commit)) + geth = "geth-" + basegeth + ext + alltools = "geth-alltools-" + basegeth + ext + + baseswarm = archiveBasename(*arch, sv.ArchiveVersion(env.Commit)) + swarm = "swarm-" + baseswarm + ext ) maybeSkipArchive(env) if err := build.WriteArchive(geth, gethArchiveFiles); err != nil { @@ -382,14 +419,17 @@ func doArchive(cmdline []string) { if err := build.WriteArchive(alltools, allToolsArchiveFiles); err != nil { log.Fatal(err) } - for _, archive := range []string{geth, alltools} { + if err := build.WriteArchive(swarm, swarmArchiveFiles); err != nil { + log.Fatal(err) + } + for _, archive := range []string{geth, alltools, swarm} { if err := archiveUpload(archive, *upload, *signer); err != nil { log.Fatal(err) } } } -func archiveBasename(arch string, env build.Environment) string { +func archiveBasename(arch string, archiveVersion string) string { platform := runtime.GOOS + "-" + arch if arch == "arm" { platform += os.Getenv("GOARM") @@ -400,18 +440,7 @@ func archiveBasename(arch string, env build.Environment) string { if arch == "ios" { platform = "ios-all" } - return platform + "-" + archiveVersion(env) -} - -func archiveVersion(env build.Environment) string { - version := build.VERSION() - if isUnstableBuild(env) { - version += "-unstable" - } - if env.Commit != "" { - version += "-" + env.Commit[:8] - } - return version + return platform + "-" + archiveVersion } func archiveUpload(archive string, blobstore string, signer string) error { @@ -461,7 +490,6 @@ func maybeSkipArchive(env build.Environment) { } // Debian Packaging - func doDebianSource(cmdline []string) { var ( signer = flag.String("signer", "", `Signing key name, also used as package author`) @@ -485,21 +513,23 @@ func doDebianSource(cmdline []string) { build.MustRun(gpg) } - // Create the packages. - for _, distro := range debDistros { - meta := newDebMetadata(distro, *signer, env, now) - pkgdir := stageDebianSource(*workdir, meta) - debuild := exec.Command("debuild", "-S", "-sa", "-us", "-uc") - debuild.Dir = pkgdir - build.MustRun(debuild) + // Create Debian packages and upload them + for _, pkg := range debPackages { + for _, distro := range debDistros { + meta := newDebMetadata(distro, *signer, env, now, pkg.Name, pkg.Version, pkg.Executables) + pkgdir := stageDebianSource(*workdir, meta) + debuild := exec.Command("debuild", "-S", "-sa", "-us", "-uc") + debuild.Dir = pkgdir + build.MustRun(debuild) - changes := fmt.Sprintf("%s_%s_source.changes", meta.Name(), meta.VersionString()) - changes = filepath.Join(*workdir, changes) - if *signer != "" { - build.MustRunCommand("debsign", changes) - } - if *upload != "" { - build.MustRunCommand("dput", *upload, changes) + changes := fmt.Sprintf("%s_%s_source.changes", meta.Name(), meta.VersionString()) + changes = filepath.Join(*workdir, changes) + if *signer != "" { + build.MustRunCommand("debsign", changes) + } + if *upload != "" { + build.MustRunCommand("dput", *upload, changes) + } } } } @@ -524,9 +554,17 @@ func isUnstableBuild(env build.Environment) bool { return true } +type debPackage struct { + Name string // the name of the Debian package to produce, e.g. "ethereum", or "ethereum-swarm" + Version string // the clean version of the debPackage, e.g. 1.8.12 or 0.3.0, without any metadata + Executables []debExecutable // executables to be included in the package +} + type debMetadata struct { Env build.Environment + PackageName string + // go-ethereum version being built. Note that this // is not the debian package version. The package version // is constructed by VersionString. @@ -538,21 +576,33 @@ type debMetadata struct { } type debExecutable struct { - Name, Description string + PackageName string + BinaryName string + Description string +} + +// Package returns the name of the package if present, or +// fallbacks to BinaryName +func (d debExecutable) Package() string { + if d.PackageName != "" { + return d.PackageName + } + return d.BinaryName } -func newDebMetadata(distro, author string, env build.Environment, t time.Time) debMetadata { +func newDebMetadata(distro, author string, env build.Environment, t time.Time, name string, version string, exes []debExecutable) debMetadata { if author == "" { // No signing key, use default author. author = "Ethereum Builds " } return debMetadata{ + PackageName: name, Env: env, Author: author, Distro: distro, - Version: build.VERSION(), + Version: version, Time: t.Format(time.RFC1123Z), - Executables: debExecutables, + Executables: exes, } } @@ -560,9 +610,9 @@ func newDebMetadata(distro, author string, env build.Environment, t time.Time) d // on all executable packages. func (meta debMetadata) Name() string { if isUnstableBuild(meta.Env) { - return "ethereum-unstable" + return meta.PackageName + "-unstable" } - return "ethereum" + return meta.PackageName } // VersionString returns the debian version of the packages. @@ -589,9 +639,9 @@ func (meta debMetadata) ExeList() string { // ExeName returns the package name of an executable package. func (meta debMetadata) ExeName(exe debExecutable) string { if isUnstableBuild(meta.Env) { - return exe.Name + "-unstable" + return exe.Package() + "-unstable" } - return exe.Name + return exe.Package() } // ExeConflicts returns the content of the Conflicts field @@ -606,7 +656,7 @@ func (meta debMetadata) ExeConflicts(exe debExecutable) string { // be preferred and the conflicting files should be handled via // alternates. We might do this eventually but using a conflict is // easier now. - return "ethereum, " + exe.Name + return "ethereum, " + exe.Package() } return "" } @@ -623,24 +673,23 @@ func stageDebianSource(tmpdir string, meta debMetadata) (pkgdir string) { // Put the debian build files in place. debian := filepath.Join(pkgdir, "debian") - build.Render("build/deb.rules", filepath.Join(debian, "rules"), 0755, meta) - build.Render("build/deb.changelog", filepath.Join(debian, "changelog"), 0644, meta) - build.Render("build/deb.control", filepath.Join(debian, "control"), 0644, meta) - build.Render("build/deb.copyright", filepath.Join(debian, "copyright"), 0644, meta) + build.Render("build/deb/"+meta.PackageName+"/deb.rules", filepath.Join(debian, "rules"), 0755, meta) + build.Render("build/deb/"+meta.PackageName+"/deb.changelog", filepath.Join(debian, "changelog"), 0644, meta) + build.Render("build/deb/"+meta.PackageName+"/deb.control", filepath.Join(debian, "control"), 0644, meta) + build.Render("build/deb/"+meta.PackageName+"/deb.copyright", filepath.Join(debian, "copyright"), 0644, meta) build.RenderString("8\n", filepath.Join(debian, "compat"), 0644, meta) build.RenderString("3.0 (native)\n", filepath.Join(debian, "source/format"), 0644, meta) for _, exe := range meta.Executables { install := filepath.Join(debian, meta.ExeName(exe)+".install") docs := filepath.Join(debian, meta.ExeName(exe)+".docs") - build.Render("build/deb.install", install, 0644, exe) - build.Render("build/deb.docs", docs, 0644, exe) + build.Render("build/deb/"+meta.PackageName+"/deb.install", install, 0644, exe) + build.Render("build/deb/"+meta.PackageName+"/deb.docs", docs, 0644, exe) } return pkgdir } // Windows installer - func doWindowsInstaller(cmdline []string) { // Parse the flags and make skip installer generation on PRs var ( @@ -690,11 +739,11 @@ func doWindowsInstaller(cmdline []string) { // Build the installer. This assumes that all the needed files have been previously // built (don't mix building and packaging to keep cross compilation complexity to a // minimum). - version := strings.Split(build.VERSION(), ".") + version := strings.Split(params.Version, ".") if env.Commit != "" { version[2] += "-" + env.Commit[:8] } - installer, _ := filepath.Abs("geth-" + archiveBasename(*arch, env) + ".exe") + installer, _ := filepath.Abs("geth-" + archiveBasename(*arch, params.ArchiveVersion(env.Commit)) + ".exe") build.MustRunCommand("makensis.exe", "/DOUTPUTFILE="+installer, "/DMAJORVERSION="+version[0], @@ -746,7 +795,7 @@ func doAndroidArchive(cmdline []string) { maybeSkipArchive(env) // Sign and upload the archive to Azure - archive := "geth-" + archiveBasename("android", env) + ".aar" + archive := "geth-" + archiveBasename("android", params.ArchiveVersion(env.Commit)) + ".aar" os.Rename("geth.aar", archive) if err := archiveUpload(archive, *upload, *signer); err != nil { @@ -831,7 +880,7 @@ func newMavenMetadata(env build.Environment) mavenMetadata { } } // Render the version and package strings - version := build.VERSION() + version := params.Version if isUnstableBuild(env) { version += "-SNAPSHOT" } @@ -866,7 +915,7 @@ func doXCodeFramework(cmdline []string) { build.MustRun(bind) return } - archive := "geth-" + archiveBasename("ios", env) + archive := "geth-" + archiveBasename("ios", params.ArchiveVersion(env.Commit)) if err := os.Mkdir(archive, os.ModePerm); err != nil { log.Fatal(err) } @@ -922,7 +971,7 @@ func newPodMetadata(env build.Environment, archive string) podMetadata { } } } - version := build.VERSION() + version := params.Version if isUnstableBuild(env) { version += "-unstable." + env.Buildnum } @@ -952,7 +1001,7 @@ func doXgo(cmdline []string) { if *alltools { args = append(args, []string{"--dest", GOBIN}...) - for _, res := range allToolsArchiveFiles { + for _, res := range allCrossCompiledArchiveFiles { if strings.HasPrefix(res, GOBIN) { // Binary tool found, cross build it explicitly args = append(args, "./"+filepath.Join("cmd", filepath.Base(res))) @@ -1018,23 +1067,14 @@ func doPurge(cmdline []string) { } for i := 0; i < len(blobs); i++ { for j := i + 1; j < len(blobs); j++ { - iTime, err := time.Parse(time.RFC1123, blobs[i].Properties.LastModified) - if err != nil { - log.Fatal(err) - } - jTime, err := time.Parse(time.RFC1123, blobs[j].Properties.LastModified) - if err != nil { - log.Fatal(err) - } - if iTime.After(jTime) { + if blobs[i].Properties.LastModified.After(blobs[j].Properties.LastModified) { blobs[i], blobs[j] = blobs[j], blobs[i] } } } // Filter out all archives more recent that the given threshold for i, blob := range blobs { - timestamp, _ := time.Parse(time.RFC1123, blob.Properties.LastModified) - if time.Since(timestamp) < time.Duration(*limit)*24*time.Hour { + if time.Since(blob.Properties.LastModified) < time.Duration(*limit)*24*time.Hour { blobs = blobs[:i] break } diff --git a/build/clean_go_build_cache.sh b/build/clean_go_build_cache.sh new file mode 100755 index 000000000000..1666381d982d --- /dev/null +++ b/build/clean_go_build_cache.sh @@ -0,0 +1,19 @@ +#!/bin/sh + +# Cleaning the Go cache only makes sense if we actually have Go installed... or +# if Go is actually callable. This does not hold true during deb packaging, so +# we need an explicit check to avoid build failures. +if ! command -v go > /dev/null; then + exit +fi + +version_gt() { + test "$(printf '%s\n' "$@" | sort -V | head -n 1)" != "$1" +} + +golang_version=$(go version |cut -d' ' -f3 |sed 's/go//') + +# Clean go build cache when go version is greater than or equal to 1.10 +if !(version_gt 1.10 $golang_version); then + go clean -cache +fi diff --git a/build/deb.install b/build/deb.install deleted file mode 100644 index 7dc76e1f5673..000000000000 --- a/build/deb.install +++ /dev/null @@ -1 +0,0 @@ -build/bin/{{.Name}} usr/bin diff --git a/build/deb.changelog b/build/deb/ethereum-swarm/deb.changelog similarity index 100% rename from build/deb.changelog rename to build/deb/ethereum-swarm/deb.changelog diff --git a/build/deb/ethereum-swarm/deb.control b/build/deb/ethereum-swarm/deb.control new file mode 100644 index 000000000000..8cd325bf55ac --- /dev/null +++ b/build/deb/ethereum-swarm/deb.control @@ -0,0 +1,19 @@ +Source: {{.Name}} +Section: science +Priority: extra +Maintainer: {{.Author}} +Build-Depends: debhelper (>= 8.0.0), golang-1.10 +Standards-Version: 3.9.5 +Homepage: https://ethereum.org +Vcs-Git: git://github.com/ethereum/go-ethereum.git +Vcs-Browser: https://github.com/ethereum/go-ethereum + +{{range .Executables}} +Package: {{$.ExeName .}} +Conflicts: {{$.ExeConflicts .}} +Architecture: any +Depends: ${shlibs:Depends}, ${misc:Depends} +Built-Using: ${misc:Built-Using} +Description: {{.Description}} + {{.Description}} +{{end}} diff --git a/build/deb.copyright b/build/deb/ethereum-swarm/deb.copyright similarity index 93% rename from build/deb.copyright rename to build/deb/ethereum-swarm/deb.copyright index 513be45b193d..fe6e36ad9d76 100644 --- a/build/deb.copyright +++ b/build/deb/ethereum-swarm/deb.copyright @@ -1,4 +1,4 @@ -Copyright 2016 The go-ethereum Authors +Copyright 2018 The go-ethereum Authors go-ethereum is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by diff --git a/build/deb.docs b/build/deb/ethereum-swarm/deb.docs similarity index 100% rename from build/deb.docs rename to build/deb/ethereum-swarm/deb.docs diff --git a/build/deb/ethereum-swarm/deb.install b/build/deb/ethereum-swarm/deb.install new file mode 100644 index 000000000000..e7666ce5fb6b --- /dev/null +++ b/build/deb/ethereum-swarm/deb.install @@ -0,0 +1 @@ +build/bin/{{.BinaryName}} usr/bin diff --git a/build/deb.rules b/build/deb/ethereum-swarm/deb.rules similarity index 100% rename from build/deb.rules rename to build/deb/ethereum-swarm/deb.rules diff --git a/build/deb/ethereum/deb.changelog b/build/deb/ethereum/deb.changelog new file mode 100644 index 000000000000..83f804a83384 --- /dev/null +++ b/build/deb/ethereum/deb.changelog @@ -0,0 +1,5 @@ +{{.Name}} ({{.VersionString}}) {{.Distro}}; urgency=low + + * git build of {{.Env.Commit}} + + -- {{.Author}} {{.Time}} diff --git a/build/deb.control b/build/deb/ethereum/deb.control similarity index 83% rename from build/deb.control rename to build/deb/ethereum/deb.control index 33c1a779f4ab..defb106fe367 100644 --- a/build/deb.control +++ b/build/deb/ethereum/deb.control @@ -11,8 +11,8 @@ Vcs-Browser: https://github.com/ethereum/go-ethereum Package: {{.Name}} Architecture: any Depends: ${misc:Depends}, {{.ExeList}} -Description: Meta-package to install geth and other tools - Meta-package to install geth and other tools +Description: Meta-package to install geth, swarm, and other tools + Meta-package to install geth, swarm and other tools {{range .Executables}} Package: {{$.ExeName .}} diff --git a/build/deb/ethereum/deb.copyright b/build/deb/ethereum/deb.copyright new file mode 100644 index 000000000000..fe6e36ad9d76 --- /dev/null +++ b/build/deb/ethereum/deb.copyright @@ -0,0 +1,14 @@ +Copyright 2018 The go-ethereum Authors + +go-ethereum is free software: you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation, either version 3 of the License, or +(at your option) any later version. + +go-ethereum is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with go-ethereum. If not, see . diff --git a/build/deb/ethereum/deb.docs b/build/deb/ethereum/deb.docs new file mode 100644 index 000000000000..62deb04972da --- /dev/null +++ b/build/deb/ethereum/deb.docs @@ -0,0 +1 @@ +AUTHORS diff --git a/build/deb/ethereum/deb.install b/build/deb/ethereum/deb.install new file mode 100644 index 000000000000..e7666ce5fb6b --- /dev/null +++ b/build/deb/ethereum/deb.install @@ -0,0 +1 @@ +build/bin/{{.BinaryName}} usr/bin diff --git a/build/deb/ethereum/deb.rules b/build/deb/ethereum/deb.rules new file mode 100644 index 000000000000..7f286569ea6e --- /dev/null +++ b/build/deb/ethereum/deb.rules @@ -0,0 +1,13 @@ +#!/usr/bin/make -f +# -*- makefile -*- + +# Uncomment this to turn on verbose mode. +#export DH_VERBOSE=1 + +override_dh_auto_build: + build/env.sh /usr/lib/go-1.10/bin/go run build/ci.go install -git-commit={{.Env.Commit}} -git-branch={{.Env.Branch}} -git-tag={{.Env.Tag}} -buildnum={{.Env.Buildnum}} -pull-request={{.Env.IsPullRequest}} + +override_dh_auto_test: + +%: + dh $@ diff --git a/cmd/clef/main.go b/cmd/clef/main.go index 348bcb22f63e..85704754def5 100644 --- a/cmd/clef/main.go +++ b/cmd/clef/main.go @@ -415,7 +415,7 @@ func signer(c *cli.Context) error { // start http server httpEndpoint := fmt.Sprintf("%s:%d", c.String(utils.RPCListenAddrFlag.Name), c.Int(rpcPortFlag.Name)) - listener, _, err := rpc.StartHTTPEndpoint(httpEndpoint, rpcAPI, []string{"account"}, cors, vhosts) + listener, _, err := rpc.StartHTTPEndpoint(httpEndpoint, rpcAPI, []string{"account"}, cors, vhosts, rpc.DefaultHTTPTimeouts) if err != nil { utils.Fatalf("Could not start RPC api: %v", err) } diff --git a/cmd/faucet/faucet.go b/cmd/faucet/faucet.go index 70fd6d1ab6f0..679906027566 100644 --- a/cmd/faucet/faucet.go +++ b/cmd/faucet/faucet.go @@ -213,7 +213,7 @@ func newFaucet(genesis *core.Genesis, port int, enodes []*discv5.Node, network u // Assemble the raw devp2p protocol stack stack, err := node.New(&node.Config{ Name: "geth", - Version: params.Version, + Version: params.VersionWithMeta, DataDir: filepath.Join(os.Getenv("HOME"), ".faucet"), P2P: p2p.Config{ NAT: nat.Any(), diff --git a/cmd/geth/bugcmd.go b/cmd/geth/bugcmd.go index 7e9a8ccc70af..0adc69d1fab4 100644 --- a/cmd/geth/bugcmd.go +++ b/cmd/geth/bugcmd.go @@ -51,7 +51,7 @@ func reportBug(ctx *cli.Context) error { fmt.Fprintln(&buff, "#### System information") fmt.Fprintln(&buff) - fmt.Fprintln(&buff, "Version:", params.Version) + fmt.Fprintln(&buff, "Version:", params.VersionWithMeta) fmt.Fprintln(&buff, "Go Version:", runtime.Version()) fmt.Fprintln(&buff, "OS:", runtime.GOOS) printOSDetails(&buff) diff --git a/cmd/geth/chaincmd.go b/cmd/geth/chaincmd.go index d3086921b95a..87548865be61 100644 --- a/cmd/geth/chaincmd.go +++ b/cmd/geth/chaincmd.go @@ -48,7 +48,6 @@ var ( ArgsUsage: "", Flags: []cli.Flag{ utils.DataDirFlag, - utils.LightModeFlag, }, Category: "BLOCKCHAIN COMMANDS", Description: ` @@ -66,7 +65,7 @@ It expects the genesis file as argument.`, Flags: []cli.Flag{ utils.DataDirFlag, utils.CacheFlag, - utils.LightModeFlag, + utils.SyncModeFlag, utils.GCModeFlag, utils.CacheDatabaseFlag, utils.CacheGCFlag, @@ -87,14 +86,15 @@ processing will proceed even if an individual RLP-file import failure occurs.`, Flags: []cli.Flag{ utils.DataDirFlag, utils.CacheFlag, - utils.LightModeFlag, + utils.SyncModeFlag, }, Category: "BLOCKCHAIN COMMANDS", Description: ` Requires a first argument of the file to write to. Optional second and third arguments control the first and last block to write. In this mode, the file will be appended -if already existing.`, +if already existing. If the file ends with .gz, the output will +be gzipped.`, } importPreimagesCommand = cli.Command{ Action: utils.MigrateFlags(importPreimages), @@ -104,7 +104,7 @@ if already existing.`, Flags: []cli.Flag{ utils.DataDirFlag, utils.CacheFlag, - utils.LightModeFlag, + utils.SyncModeFlag, }, Category: "BLOCKCHAIN COMMANDS", Description: ` @@ -118,7 +118,7 @@ if already existing.`, Flags: []cli.Flag{ utils.DataDirFlag, utils.CacheFlag, - utils.LightModeFlag, + utils.SyncModeFlag, }, Category: "BLOCKCHAIN COMMANDS", Description: ` @@ -148,7 +148,6 @@ The first argument must be the directory containing the blockchain to download f ArgsUsage: " ", Flags: []cli.Flag{ utils.DataDirFlag, - utils.LightModeFlag, }, Category: "BLOCKCHAIN COMMANDS", Description: ` @@ -162,7 +161,7 @@ Remove blockchain and state databases`, Flags: []cli.Flag{ utils.DataDirFlag, utils.CacheFlag, - utils.LightModeFlag, + utils.SyncModeFlag, }, Category: "BLOCKCHAIN COMMANDS", Description: ` diff --git a/cmd/geth/consolecmd_test.go b/cmd/geth/consolecmd_test.go index 258b9e6dd92f..34ba877020c0 100644 --- a/cmd/geth/consolecmd_test.go +++ b/cmd/geth/consolecmd_test.go @@ -31,7 +31,7 @@ import ( ) const ( - ipcAPIs = "admin:1.0 debug:1.0 eth:1.0 miner:1.0 net:1.0 personal:1.0 rpc:1.0 shh:1.0 txpool:1.0 web3:1.0" + ipcAPIs = "admin:1.0 debug:1.0 eth:1.0 ethash:1.0 miner:1.0 net:1.0 personal:1.0 rpc:1.0 shh:1.0 txpool:1.0 web3:1.0" httpAPIs = "eth:1.0 net:1.0 rpc:1.0 web3:1.0" ) @@ -50,7 +50,7 @@ func TestConsoleWelcome(t *testing.T) { geth.SetTemplateFunc("goos", func() string { return runtime.GOOS }) geth.SetTemplateFunc("goarch", func() string { return runtime.GOARCH }) geth.SetTemplateFunc("gover", runtime.Version) - geth.SetTemplateFunc("gethver", func() string { return params.Version }) + geth.SetTemplateFunc("gethver", func() string { return params.VersionWithMeta }) geth.SetTemplateFunc("niltime", func() string { return time.Unix(0, 0).Format(time.RFC1123) }) geth.SetTemplateFunc("apis", func() string { return ipcAPIs }) @@ -133,7 +133,7 @@ func testAttachWelcome(t *testing.T, geth *testgeth, endpoint, apis string) { attach.SetTemplateFunc("goos", func() string { return runtime.GOOS }) attach.SetTemplateFunc("goarch", func() string { return runtime.GOARCH }) attach.SetTemplateFunc("gover", runtime.Version) - attach.SetTemplateFunc("gethver", func() string { return params.Version }) + attach.SetTemplateFunc("gethver", func() string { return params.VersionWithMeta }) attach.SetTemplateFunc("etherbase", func() string { return geth.Etherbase }) attach.SetTemplateFunc("niltime", func() string { return time.Unix(0, 0).Format(time.RFC1123) }) attach.SetTemplateFunc("ipc", func() bool { return strings.HasPrefix(endpoint, "ipc") }) diff --git a/cmd/geth/main.go b/cmd/geth/main.go index f4a0ac5df647..4b86382bdbaa 100644 --- a/cmd/geth/main.go +++ b/cmd/geth/main.go @@ -72,6 +72,7 @@ var ( utils.EthashDatasetDirFlag, utils.EthashDatasetsInMemoryFlag, utils.EthashDatasetsOnDiskFlag, + utils.TxPoolLocalsFlag, utils.TxPoolNoLocalsFlag, utils.TxPoolJournalFlag, utils.TxPoolRejournalFlag, @@ -82,8 +83,6 @@ var ( utils.TxPoolAccountQueueFlag, utils.TxPoolGlobalQueueFlag, utils.TxPoolLifetimeFlag, - utils.FastSyncFlag, - utils.LightModeFlag, utils.SyncModeFlag, utils.GCModeFlag, utils.LightServFlag, @@ -96,11 +95,19 @@ var ( utils.ListenPortFlag, utils.MaxPeersFlag, utils.MaxPendingPeersFlag, - utils.EtherbaseFlag, - utils.GasPriceFlag, - utils.MinerThreadsFlag, utils.MiningEnabledFlag, - utils.TargetGasLimitFlag, + utils.MinerThreadsFlag, + utils.MinerLegacyThreadsFlag, + utils.MinerNotifyFlag, + utils.MinerGasTargetFlag, + utils.MinerLegacyGasTargetFlag, + utils.MinerGasPriceFlag, + utils.MinerLegacyGasPriceFlag, + utils.MinerEtherbaseFlag, + utils.MinerLegacyEtherbaseFlag, + utils.MinerExtraDataFlag, + utils.MinerLegacyExtraDataFlag, + utils.MinerRecommitIntervalFlag, utils.NATFlag, utils.NoDiscoverFlag, utils.DiscoveryV5Flag, @@ -121,7 +128,6 @@ var ( utils.NoCompactionFlag, utils.GpoBlocksFlag, utils.GpoPercentileFlag, - utils.ExtraDataFlag, configFileFlag, } @@ -199,10 +205,15 @@ func init() { app.Before = func(ctx *cli.Context) error { runtime.GOMAXPROCS(runtime.NumCPU()) - if err := debug.Setup(ctx); err != nil { + + logdir := "" + if ctx.GlobalBool(utils.DashboardEnabledFlag.Name) { + logdir = (&node.Config{DataDir: utils.MakeDataDir(ctx)}).ResolvePath("logs") + } + if err := debug.Setup(ctx, logdir); err != nil { return err } - // Cap the cache allowance and tune the garbage colelctor + // Cap the cache allowance and tune the garbage collector var mem gosigar.Mem if err := mem.Get(); err == nil { allowance := int(mem.Total / 1024 / 1024 / 3) @@ -246,6 +257,9 @@ func main() { // It creates a default node based on the command line arguments and runs it in // blocking mode, waiting for it to be shut down. func geth(ctx *cli.Context) error { + if args := ctx.Args(); len(args) > 0 { + return fmt.Errorf("invalid command: %q", args[0]) + } node := makeFullNode(ctx) startNode(ctx, node) node.Wait() @@ -300,11 +314,11 @@ func startNode(ctx *cli.Context, stack *node.Node) { status, _ := event.Wallet.Status() log.Info("New wallet appeared", "url", event.Wallet.URL(), "status", status) + derivationPath := accounts.DefaultBaseDerivationPath if event.Wallet.URL().Scheme == "ledger" { - event.Wallet.SelfDerive(accounts.DefaultLedgerBaseDerivationPath, stateReader) - } else { - event.Wallet.SelfDerive(accounts.DefaultBaseDerivationPath, stateReader) + derivationPath = accounts.DefaultLedgerBaseDerivationPath } + event.Wallet.SelfDerive(derivationPath, stateReader) case accounts.WalletDropped: log.Info("Old wallet dropped", "url", event.Wallet.URL()) @@ -315,7 +329,7 @@ func startNode(ctx *cli.Context, stack *node.Node) { // Start auxiliary services if enabled if ctx.GlobalBool(utils.MiningEnabledFlag.Name) || ctx.GlobalBool(utils.DeveloperFlag.Name) { // Mining only makes sense if a full Ethereum node is running - if ctx.GlobalBool(utils.LightModeFlag.Name) || ctx.GlobalString(utils.SyncModeFlag.Name) == "light" { + if ctx.GlobalString(utils.SyncModeFlag.Name) == "light" { utils.Fatalf("Light clients do not support mining") } var ethereum *eth.Ethereum @@ -323,7 +337,11 @@ func startNode(ctx *cli.Context, stack *node.Node) { utils.Fatalf("Ethereum service not running: %v", err) } // Use a reduced number of threads if requested - if threads := ctx.GlobalInt(utils.MinerThreadsFlag.Name); threads > 0 { + threads := ctx.GlobalInt(utils.MinerLegacyThreadsFlag.Name) + if ctx.GlobalIsSet(utils.MinerThreadsFlag.Name) { + threads = ctx.GlobalInt(utils.MinerThreadsFlag.Name) + } + if threads > 0 { type threaded interface { SetThreads(threads int) } @@ -332,7 +350,11 @@ func startNode(ctx *cli.Context, stack *node.Node) { } } // Set the gas price to the limits from the CLI and start mining - ethereum.TxPool().SetGasPrice(utils.GlobalBig(ctx, utils.GasPriceFlag.Name)) + gasprice := utils.GlobalBig(ctx, utils.MinerLegacyGasPriceFlag.Name) + if ctx.IsSet(utils.MinerGasPriceFlag.Name) { + gasprice = utils.GlobalBig(ctx, utils.MinerGasPriceFlag.Name) + } + ethereum.TxPool().SetGasPrice(gasprice) if err := ethereum.StartMining(true); err != nil { utils.Fatalf("Failed to start mining: %v", err) } diff --git a/cmd/geth/misccmd.go b/cmd/geth/misccmd.go index aa9b1ee568e7..f62e254786d5 100644 --- a/cmd/geth/misccmd.go +++ b/cmd/geth/misccmd.go @@ -108,7 +108,7 @@ func makedag(ctx *cli.Context) error { func version(ctx *cli.Context) error { fmt.Println(strings.Title(clientIdentifier)) - fmt.Println("Version:", params.Version) + fmt.Println("Version:", params.VersionWithMeta) if gitCommit != "" { fmt.Println("Git Commit:", gitCommit) } diff --git a/cmd/geth/monitorcmd.go b/cmd/geth/monitorcmd.go index cd19caa2765c..e4ba96a7a681 100644 --- a/cmd/geth/monitorcmd.go +++ b/cmd/geth/monitorcmd.go @@ -185,12 +185,12 @@ func resolveMetric(metrics map[string]interface{}, pattern string, path string) parts := strings.SplitN(pattern, "/", 2) if len(parts) > 1 { for _, variation := range strings.Split(parts[0], ",") { - if submetrics, ok := metrics[variation].(map[string]interface{}); !ok { + submetrics, ok := metrics[variation].(map[string]interface{}) + if !ok { utils.Fatalf("Failed to retrieve system metrics: %s", path+variation) return nil - } else { - results = append(results, resolveMetric(submetrics, parts[1], path+variation+"/")...) } + results = append(results, resolveMetric(submetrics, parts[1], path+variation+"/")...) } return results } diff --git a/cmd/geth/usage.go b/cmd/geth/usage.go index 06db64664ee5..1e27d0ae8d50 100644 --- a/cmd/geth/usage.go +++ b/cmd/geth/usage.go @@ -83,7 +83,8 @@ var AppHelpFlagGroups = []flagGroup{ utils.LightKDFFlag, }, }, - {Name: "DEVELOPER CHAIN", + { + Name: "DEVELOPER CHAIN", Flags: []cli.Flag{ utils.DeveloperFlag, utils.DeveloperPeriodFlag, @@ -113,6 +114,7 @@ var AppHelpFlagGroups = []flagGroup{ { Name: "TRANSACTION POOL", Flags: []cli.Flag{ + utils.TxPoolLocalsFlag, utils.TxPoolNoLocalsFlag, utils.TxPoolJournalFlag, utils.TxPoolRejournalFlag, @@ -184,10 +186,12 @@ var AppHelpFlagGroups = []flagGroup{ Flags: []cli.Flag{ utils.MiningEnabledFlag, utils.MinerThreadsFlag, - utils.EtherbaseFlag, - utils.TargetGasLimitFlag, - utils.GasPriceFlag, - utils.ExtraDataFlag, + utils.MinerNotifyFlag, + utils.MinerGasPriceFlag, + utils.MinerGasTargetFlag, + utils.MinerEtherbaseFlag, + utils.MinerExtraDataFlag, + utils.MinerRecommitIntervalFlag, }, }, { @@ -229,8 +233,11 @@ var AppHelpFlagGroups = []flagGroup{ { Name: "DEPRECATED", Flags: []cli.Flag{ - utils.FastSyncFlag, - utils.LightModeFlag, + utils.MinerLegacyThreadsFlag, + utils.MinerLegacyGasTargetFlag, + utils.MinerLegacyGasPriceFlag, + utils.MinerLegacyEtherbaseFlag, + utils.MinerLegacyExtraDataFlag, }, }, { diff --git a/cmd/puppeth/module_dashboard.go b/cmd/puppeth/module_dashboard.go index a517623381b5..d22bd8110398 100644 --- a/cmd/puppeth/module_dashboard.go +++ b/cmd/puppeth/module_dashboard.go @@ -678,9 +678,9 @@ func deployDashboard(client *sshClient, network string, conf *config, config *da // Build and deploy the dashboard service if nocache { - return nil, client.Stream(fmt.Sprintf("cd %s && docker-compose -p %s build --pull --no-cache && docker-compose -p %s up -d --force-recreate", workdir, network, network)) + return nil, client.Stream(fmt.Sprintf("cd %s && docker-compose -p %s build --pull --no-cache && docker-compose -p %s up -d --force-recreate --timeout 60", workdir, network, network)) } - return nil, client.Stream(fmt.Sprintf("cd %s && docker-compose -p %s up -d --build --force-recreate", workdir, network)) + return nil, client.Stream(fmt.Sprintf("cd %s && docker-compose -p %s up -d --build --force-recreate --timeout 60", workdir, network)) } // dashboardInfos is returned from a dashboard status check to allow reporting diff --git a/cmd/puppeth/module_ethstats.go b/cmd/puppeth/module_ethstats.go index 20b7afe236bd..a7d99a297992 100644 --- a/cmd/puppeth/module_ethstats.go +++ b/cmd/puppeth/module_ethstats.go @@ -100,9 +100,9 @@ func deployEthstats(client *sshClient, network string, port int, secret string, // Build and deploy the ethstats service if nocache { - return nil, client.Stream(fmt.Sprintf("cd %s && docker-compose -p %s build --pull --no-cache && docker-compose -p %s up -d --force-recreate", workdir, network, network)) + return nil, client.Stream(fmt.Sprintf("cd %s && docker-compose -p %s build --pull --no-cache && docker-compose -p %s up -d --force-recreate --timeout 60", workdir, network, network)) } - return nil, client.Stream(fmt.Sprintf("cd %s && docker-compose -p %s up -d --build --force-recreate", workdir, network)) + return nil, client.Stream(fmt.Sprintf("cd %s && docker-compose -p %s up -d --build --force-recreate --timeout 60", workdir, network)) } // ethstatsInfos is returned from an ethstats status check to allow reporting @@ -122,7 +122,7 @@ func (info *ethstatsInfos) Report() map[string]string { "Website address": info.host, "Website listener port": strconv.Itoa(info.port), "Login secret": info.secret, - "Banned addresses": fmt.Sprintf("%v", info.banned), + "Banned addresses": strings.Join(info.banned, "\n"), } } diff --git a/cmd/puppeth/module_explorer.go b/cmd/puppeth/module_explorer.go index bb43e5fe4e9b..e916deaf6f99 100644 --- a/cmd/puppeth/module_explorer.go +++ b/cmd/puppeth/module_explorer.go @@ -38,7 +38,7 @@ ADD chain.json /chain.json RUN \ echo '(cd ../eth-net-intelligence-api && pm2 start /ethstats.json)' > explorer.sh && \ echo '(cd ../etherchain-light && npm start &)' >> explorer.sh && \ - echo '/parity/parity --chain=/chain.json --port={{.NodePort}} --tracing=on --fat-db=on --pruning=archive' >> explorer.sh + echo 'exec /parity/parity --chain=/chain.json --port={{.NodePort}} --tracing=on --fat-db=on --pruning=archive' >> explorer.sh ENTRYPOINT ["/bin/sh", "explorer.sh"] ` @@ -140,9 +140,9 @@ func deployExplorer(client *sshClient, network string, chainspec []byte, config // Build and deploy the boot or seal node service if nocache { - return nil, client.Stream(fmt.Sprintf("cd %s && docker-compose -p %s build --pull --no-cache && docker-compose -p %s up -d --force-recreate", workdir, network, network)) + return nil, client.Stream(fmt.Sprintf("cd %s && docker-compose -p %s build --pull --no-cache && docker-compose -p %s up -d --force-recreate --timeout 60", workdir, network, network)) } - return nil, client.Stream(fmt.Sprintf("cd %s && docker-compose -p %s up -d --build --force-recreate", workdir, network)) + return nil, client.Stream(fmt.Sprintf("cd %s && docker-compose -p %s up -d --build --force-recreate --timeout 60", workdir, network)) } // explorerInfos is returned from a block explorer status check to allow reporting diff --git a/cmd/puppeth/module_faucet.go b/cmd/puppeth/module_faucet.go index 8365bf47d039..06c9fc0f583c 100644 --- a/cmd/puppeth/module_faucet.go +++ b/cmd/puppeth/module_faucet.go @@ -133,9 +133,9 @@ func deployFaucet(client *sshClient, network string, bootnodes []string, config // Build and deploy the faucet service if nocache { - return nil, client.Stream(fmt.Sprintf("cd %s && docker-compose -p %s build --pull --no-cache && docker-compose -p %s up -d --force-recreate", workdir, network, network)) + return nil, client.Stream(fmt.Sprintf("cd %s && docker-compose -p %s build --pull --no-cache && docker-compose -p %s up -d --force-recreate --timeout 60", workdir, network, network)) } - return nil, client.Stream(fmt.Sprintf("cd %s && docker-compose -p %s up -d --build --force-recreate", workdir, network)) + return nil, client.Stream(fmt.Sprintf("cd %s && docker-compose -p %s up -d --build --force-recreate --timeout 60", workdir, network)) } // faucetInfos is returned from a faucet status check to allow reporting various diff --git a/cmd/puppeth/module_nginx.go b/cmd/puppeth/module_nginx.go index 35c0efc8ad81..7f87661d3bc9 100644 --- a/cmd/puppeth/module_nginx.go +++ b/cmd/puppeth/module_nginx.go @@ -81,9 +81,9 @@ func deployNginx(client *sshClient, network string, port int, nocache bool) ([]b // Build and deploy the reverse-proxy service if nocache { - return nil, client.Stream(fmt.Sprintf("cd %s && docker-compose -p %s build --pull --no-cache && docker-compose -p %s up -d --force-recreate", workdir, network, network)) + return nil, client.Stream(fmt.Sprintf("cd %s && docker-compose -p %s build --pull --no-cache && docker-compose -p %s up -d --force-recreate --timeout 60", workdir, network, network)) } - return nil, client.Stream(fmt.Sprintf("cd %s && docker-compose -p %s up -d --build --force-recreate", workdir, network)) + return nil, client.Stream(fmt.Sprintf("cd %s && docker-compose -p %s up -d --build --force-recreate --timeout 60", workdir, network)) } // nginxInfos is returned from an nginx reverse-proxy status check to allow diff --git a/cmd/puppeth/module_node.go b/cmd/puppeth/module_node.go index 1e1767c04b30..8ad41555e118 100644 --- a/cmd/puppeth/module_node.go +++ b/cmd/puppeth/module_node.go @@ -42,7 +42,7 @@ ADD genesis.json /genesis.json RUN \ echo 'geth --cache 512 init /genesis.json' > geth.sh && \{{if .Unlock}} echo 'mkdir -p /root/.ethereum/keystore/ && cp /signer.json /root/.ethereum/keystore/' >> geth.sh && \{{end}} - echo $'geth --networkid {{.NetworkID}} --cache 512 --port {{.Port}} --maxpeers {{.Peers}} {{.LightFlag}} --ethstats \'{{.Ethstats}}\' {{if .Bootnodes}}--bootnodes {{.Bootnodes}}{{end}} {{if .Etherbase}}--etherbase {{.Etherbase}} --mine --minerthreads 1{{end}} {{if .Unlock}}--unlock 0 --password /signer.pass --mine{{end}} --targetgaslimit {{.GasTarget}} --gasprice {{.GasPrice}}' >> geth.sh + echo $'exec geth --networkid {{.NetworkID}} --cache 512 --port {{.Port}} --maxpeers {{.Peers}} {{.LightFlag}} --ethstats \'{{.Ethstats}}\' {{if .Bootnodes}}--bootnodes {{.Bootnodes}}{{end}} {{if .Etherbase}}--miner.etherbase {{.Etherbase}} --mine --miner.threads 1{{end}} {{if .Unlock}}--unlock 0 --password /signer.pass --mine{{end}} --miner.gastarget {{.GasTarget}} --miner.gasprice {{.GasPrice}}' >> geth.sh ENTRYPOINT ["/bin/sh", "geth.sh"] ` @@ -139,9 +139,9 @@ func deployNode(client *sshClient, network string, bootnodes []string, config *n // Build and deploy the boot or seal node service if nocache { - return nil, client.Stream(fmt.Sprintf("cd %s && docker-compose -p %s build --pull --no-cache && docker-compose -p %s up -d --force-recreate", workdir, network, network)) + return nil, client.Stream(fmt.Sprintf("cd %s && docker-compose -p %s build --pull --no-cache && docker-compose -p %s up -d --force-recreate --timeout 60", workdir, network, network)) } - return nil, client.Stream(fmt.Sprintf("cd %s && docker-compose -p %s up -d --build --force-recreate", workdir, network)) + return nil, client.Stream(fmt.Sprintf("cd %s && docker-compose -p %s up -d --build --force-recreate --timeout 60", workdir, network)) } // nodeInfos is returned from a boot or seal node status check to allow reporting @@ -221,7 +221,7 @@ func checkNode(client *sshClient, network string, boot bool) (*nodeInfos, error) // Container available, retrieve its node ID and its genesis json var out []byte - if out, err = client.Run(fmt.Sprintf("docker exec %s_%s_1 geth --exec admin.nodeInfo.id attach", network, kind)); err != nil { + if out, err = client.Run(fmt.Sprintf("docker exec %s_%s_1 geth --exec admin.nodeInfo.id --cache=16 attach", network, kind)); err != nil { return nil, ErrServiceUnreachable } id := bytes.Trim(bytes.TrimSpace(out), "\"") diff --git a/cmd/puppeth/module_wallet.go b/cmd/puppeth/module_wallet.go index 5e5032bedf00..90812c4a0b8b 100644 --- a/cmd/puppeth/module_wallet.go +++ b/cmd/puppeth/module_wallet.go @@ -37,7 +37,7 @@ ADD genesis.json /genesis.json RUN \ echo 'node server.js &' > wallet.sh && \ echo 'geth --cache 512 init /genesis.json' >> wallet.sh && \ - echo $'geth --networkid {{.NetworkID}} --port {{.NodePort}} --bootnodes {{.Bootnodes}} --ethstats \'{{.Ethstats}}\' --cache=512 --rpc --rpcaddr=0.0.0.0 --rpccorsdomain "*" --rpcvhosts "*"' >> wallet.sh + echo $'exec geth --networkid {{.NetworkID}} --port {{.NodePort}} --bootnodes {{.Bootnodes}} --ethstats \'{{.Ethstats}}\' --cache=512 --rpc --rpcaddr=0.0.0.0 --rpccorsdomain "*" --rpcvhosts "*"' >> wallet.sh RUN \ sed -i 's/PuppethNetworkID/{{.NetworkID}}/g' dist/js/etherwallet-master.js && \ @@ -120,9 +120,9 @@ func deployWallet(client *sshClient, network string, bootnodes []string, config // Build and deploy the boot or seal node service if nocache { - return nil, client.Stream(fmt.Sprintf("cd %s && docker-compose -p %s build --pull --no-cache && docker-compose -p %s up -d --force-recreate", workdir, network, network)) + return nil, client.Stream(fmt.Sprintf("cd %s && docker-compose -p %s build --pull --no-cache && docker-compose -p %s up -d --force-recreate --timeout 60", workdir, network, network)) } - return nil, client.Stream(fmt.Sprintf("cd %s && docker-compose -p %s up -d --build --force-recreate", workdir, network)) + return nil, client.Stream(fmt.Sprintf("cd %s && docker-compose -p %s up -d --build --force-recreate --timeout 60", workdir, network)) } // walletInfos is returned from a web wallet status check to allow reporting diff --git a/cmd/puppeth/ssh.go b/cmd/puppeth/ssh.go index 158261ce0516..c507596065ba 100644 --- a/cmd/puppeth/ssh.go +++ b/cmd/puppeth/ssh.go @@ -45,33 +45,44 @@ type sshClient struct { // dial establishes an SSH connection to a remote node using the current user and // the user's configured private RSA key. If that fails, password authentication -// is fallen back to. The caller may override the login user via user@server:port. +// is fallen back to. server can be a string like user:identity@server:port. func dial(server string, pubkey []byte) (*sshClient, error) { - // Figure out a label for the server and a logger - label := server - if strings.Contains(label, ":") { - label = label[:strings.Index(label, ":")] - } - login := "" + // Figure out username, identity, hostname and port + hostname := "" + hostport := server + username := "" + identity := "id_rsa" // default + if strings.Contains(server, "@") { - login = label[:strings.Index(label, "@")] - label = label[strings.Index(label, "@")+1:] - server = server[strings.Index(server, "@")+1:] + prefix := server[:strings.Index(server, "@")] + if strings.Contains(prefix, ":") { + username = prefix[:strings.Index(prefix, ":")] + identity = prefix[strings.Index(prefix, ":")+1:] + } else { + username = prefix + } + hostport = server[strings.Index(server, "@")+1:] } - logger := log.New("server", label) + if strings.Contains(hostport, ":") { + hostname = hostport[:strings.Index(hostport, ":")] + } else { + hostname = hostport + hostport += ":22" + } + logger := log.New("server", server) logger.Debug("Attempting to establish SSH connection") user, err := user.Current() if err != nil { return nil, err } - if login == "" { - login = user.Username + if username == "" { + username = user.Username } // Configure the supported authentication methods (private key and password) var auths []ssh.AuthMethod - path := filepath.Join(user.HomeDir, ".ssh", "id_rsa") + path := filepath.Join(user.HomeDir, ".ssh", identity) if buf, err := ioutil.ReadFile(path); err != nil { log.Warn("No SSH key, falling back to passwords", "path", path, "err", err) } else { @@ -94,14 +105,14 @@ func dial(server string, pubkey []byte) (*sshClient, error) { } } auths = append(auths, ssh.PasswordCallback(func() (string, error) { - fmt.Printf("What's the login password for %s at %s? (won't be echoed)\n> ", login, server) + fmt.Printf("What's the login password for %s at %s? (won't be echoed)\n> ", username, server) blob, err := terminal.ReadPassword(int(os.Stdin.Fd())) fmt.Println() return string(blob), err })) // Resolve the IP address of the remote server - addr, err := net.LookupHost(label) + addr, err := net.LookupHost(hostname) if err != nil { return nil, err } @@ -109,10 +120,7 @@ func dial(server string, pubkey []byte) (*sshClient, error) { return nil, errors.New("no IPs associated with domain") } // Try to dial in to the remote server - logger.Trace("Dialing remote SSH server", "user", login) - if !strings.Contains(server, ":") { - server += ":22" - } + logger.Trace("Dialing remote SSH server", "user", username) keycheck := func(hostname string, remote net.Addr, key ssh.PublicKey) error { // If no public key is known for SSH, ask the user to confirm if pubkey == nil { @@ -139,13 +147,13 @@ func dial(server string, pubkey []byte) (*sshClient, error) { // We have a mismatch, forbid connecting return errors.New("ssh key mismatch, readd the machine to update") } - client, err := ssh.Dial("tcp", server, &ssh.ClientConfig{User: login, Auth: auths, HostKeyCallback: keycheck}) + client, err := ssh.Dial("tcp", hostport, &ssh.ClientConfig{User: username, Auth: auths, HostKeyCallback: keycheck}) if err != nil { return nil, err } // Connection established, return our utility wrapper c := &sshClient{ - server: label, + server: hostname, address: addr[0], pubkey: pubkey, client: client, diff --git a/cmd/puppeth/wizard_netstats.go b/cmd/puppeth/wizard_netstats.go index a307c5ee3a89..99ca11bb1776 100644 --- a/cmd/puppeth/wizard_netstats.go +++ b/cmd/puppeth/wizard_netstats.go @@ -82,7 +82,6 @@ func (w *wizard) gatherStats(server string, pubkey []byte, client *sshClient) *s logger.Info("Starting remote server health-check") stat := &serverStat{ - address: client.address, services: make(map[string]map[string]string), } if client == nil { @@ -94,6 +93,8 @@ func (w *wizard) gatherStats(server string, pubkey []byte, client *sshClient) *s } client = conn } + stat.address = client.address + // Client connected one way or another, run health-checks logger.Debug("Checking for nginx availability") if infos, err := checkNginx(client, w.network); err != nil { @@ -203,7 +204,7 @@ func (stats serverStats) render() { table.SetHeader([]string{"Server", "Address", "Service", "Config", "Value"}) table.SetAlignment(tablewriter.ALIGN_LEFT) - table.SetColWidth(100) + table.SetColWidth(40) // Find the longest lines for all columns for the hacked separator separator := make([]string, 5) @@ -214,6 +215,9 @@ func (stats serverStats) render() { if len(stat.address) > len(separator[1]) { separator[1] = strings.Repeat("-", len(stat.address)) } + if len(stat.failure) > len(separator[1]) { + separator[1] = strings.Repeat("-", len(stat.failure)) + } for service, configs := range stat.services { if len(service) > len(separator[2]) { separator[2] = strings.Repeat("-", len(service)) @@ -222,8 +226,10 @@ func (stats serverStats) render() { if len(config) > len(separator[3]) { separator[3] = strings.Repeat("-", len(config)) } - if len(value) > len(separator[4]) { - separator[4] = strings.Repeat("-", len(value)) + for _, val := range strings.Split(value, "\n") { + if len(val) > len(separator[4]) { + separator[4] = strings.Repeat("-", len(val)) + } } } } @@ -248,7 +254,11 @@ func (stats serverStats) render() { sort.Strings(services) if len(services) == 0 { - table.Append([]string{server, stats[server].address, "", "", ""}) + if stats[server].failure != "" { + table.Append([]string{server, stats[server].failure, "", "", ""}) + } else { + table.Append([]string{server, stats[server].address, "", "", ""}) + } } for j, service := range services { // Add an empty line between all services @@ -263,13 +273,17 @@ func (stats serverStats) render() { sort.Strings(configs) for k, config := range configs { - switch { - case j == 0 && k == 0: - table.Append([]string{server, stats[server].address, service, config, stats[server].services[service][config]}) - case k == 0: - table.Append([]string{"", "", service, config, stats[server].services[service][config]}) - default: - table.Append([]string{"", "", "", config, stats[server].services[service][config]}) + for l, value := range strings.Split(stats[server].services[service][config], "\n") { + switch { + case j == 0 && k == 0 && l == 0: + table.Append([]string{server, stats[server].address, service, config, value}) + case k == 0 && l == 0: + table.Append([]string{"", "", service, config, value}) + case l == 0: + table.Append([]string{"", "", "", config, value}) + default: + table.Append([]string{"", "", "", "", value}) + } } } } diff --git a/cmd/puppeth/wizard_network.go b/cmd/puppeth/wizard_network.go index d780c550b164..c0ddcc2a3c91 100644 --- a/cmd/puppeth/wizard_network.go +++ b/cmd/puppeth/wizard_network.go @@ -62,14 +62,14 @@ func (w *wizard) manageServers() { } } -// makeServer reads a single line from stdin and interprets it as a hostname to -// connect to. It tries to establish a new SSH session and also executing some -// baseline validations. +// makeServer reads a single line from stdin and interprets it as +// username:identity@hostname to connect to. It tries to establish a +// new SSH session and also executing some baseline validations. // // If connection succeeds, the server is added to the wizards configs! func (w *wizard) makeServer() string { fmt.Println() - fmt.Println("Please enter remote server's address:") + fmt.Println("What is the remote server's address ([username[:identity]@]hostname[:port])?") // Read and dial the server to ensure docker is present input := w.readString() diff --git a/cmd/swarm/access.go b/cmd/swarm/access.go new file mode 100644 index 000000000000..12cfbfc1a46b --- /dev/null +++ b/cmd/swarm/access.go @@ -0,0 +1,219 @@ +// Copyright 2018 The go-ethereum Authors +// This file is part of go-ethereum. +// +// go-ethereum is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// go-ethereum is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with go-ethereum. If not, see . +package main + +import ( + "crypto/rand" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "strings" + + "github.com/ethereum/go-ethereum/cmd/utils" + "github.com/ethereum/go-ethereum/swarm/api" + "github.com/ethereum/go-ethereum/swarm/api/client" + "gopkg.in/urfave/cli.v1" +) + +var salt = make([]byte, 32) + +func init() { + if _, err := io.ReadFull(rand.Reader, salt); err != nil { + panic("reading from crypto/rand failed: " + err.Error()) + } +} + +func accessNewPass(ctx *cli.Context) { + args := ctx.Args() + if len(args) != 1 { + utils.Fatalf("Expected 1 argument - the ref") + } + + var ( + ae *api.AccessEntry + accessKey []byte + err error + ref = args[0] + password = getPassPhrase("", 0, makePasswordList(ctx)) + dryRun = ctx.Bool(SwarmDryRunFlag.Name) + ) + accessKey, ae, err = api.DoPasswordNew(ctx, password, salt) + if err != nil { + utils.Fatalf("error getting session key: %v", err) + } + m, err := api.GenerateAccessControlManifest(ctx, ref, accessKey, ae) + if dryRun { + err = printManifests(m, nil) + if err != nil { + utils.Fatalf("had an error printing the manifests: %v", err) + } + } else { + utils.Fatalf("uploading manifests") + err = uploadManifests(ctx, m, nil) + if err != nil { + utils.Fatalf("had an error uploading the manifests: %v", err) + } + } +} + +func accessNewPK(ctx *cli.Context) { + args := ctx.Args() + if len(args) != 1 { + utils.Fatalf("Expected 1 argument - the ref") + } + + var ( + ae *api.AccessEntry + sessionKey []byte + err error + ref = args[0] + privateKey = getPrivKey(ctx) + granteePublicKey = ctx.String(SwarmAccessGrantKeyFlag.Name) + dryRun = ctx.Bool(SwarmDryRunFlag.Name) + ) + sessionKey, ae, err = api.DoPKNew(ctx, privateKey, granteePublicKey, salt) + if err != nil { + utils.Fatalf("error getting session key: %v", err) + } + m, err := api.GenerateAccessControlManifest(ctx, ref, sessionKey, ae) + if dryRun { + err = printManifests(m, nil) + if err != nil { + utils.Fatalf("had an error printing the manifests: %v", err) + } + } else { + err = uploadManifests(ctx, m, nil) + if err != nil { + utils.Fatalf("had an error uploading the manifests: %v", err) + } + } +} + +func accessNewACT(ctx *cli.Context) { + args := ctx.Args() + if len(args) != 1 { + utils.Fatalf("Expected 1 argument - the ref") + } + + var ( + ae *api.AccessEntry + actManifest *api.Manifest + accessKey []byte + err error + ref = args[0] + grantees = []string{} + actFilename = ctx.String(SwarmAccessGrantKeysFlag.Name) + privateKey = getPrivKey(ctx) + dryRun = ctx.Bool(SwarmDryRunFlag.Name) + ) + + bytes, err := ioutil.ReadFile(actFilename) + if err != nil { + utils.Fatalf("had an error reading the grantee public key list") + } + grantees = strings.Split(string(bytes), "\n") + accessKey, ae, actManifest, err = api.DoACTNew(ctx, privateKey, salt, grantees) + if err != nil { + utils.Fatalf("error generating ACT manifest: %v", err) + } + + if err != nil { + utils.Fatalf("error getting session key: %v", err) + } + m, err := api.GenerateAccessControlManifest(ctx, ref, accessKey, ae) + if err != nil { + utils.Fatalf("error generating root access manifest: %v", err) + } + + if dryRun { + err = printManifests(m, actManifest) + if err != nil { + utils.Fatalf("had an error printing the manifests: %v", err) + } + } else { + err = uploadManifests(ctx, m, actManifest) + if err != nil { + utils.Fatalf("had an error uploading the manifests: %v", err) + } + } +} + +func printManifests(rootAccessManifest, actManifest *api.Manifest) error { + js, err := json.Marshal(rootAccessManifest) + if err != nil { + return err + } + fmt.Println(string(js)) + + if actManifest != nil { + js, err := json.Marshal(actManifest) + if err != nil { + return err + } + fmt.Println(string(js)) + } + return nil +} + +func uploadManifests(ctx *cli.Context, rootAccessManifest, actManifest *api.Manifest) error { + bzzapi := strings.TrimRight(ctx.GlobalString(SwarmApiFlag.Name), "/") + client := client.NewClient(bzzapi) + + var ( + key string + err error + ) + if actManifest != nil { + key, err = client.UploadManifest(actManifest, false) + if err != nil { + return err + } + + rootAccessManifest.Entries[0].Access.Act = key + } + key, err = client.UploadManifest(rootAccessManifest, false) + if err != nil { + return err + } + fmt.Println(key) + return nil +} + +// makePasswordList reads password lines from the file specified by the global --password flag +// and also by the same subcommand --password flag. +// This function ia a fork of utils.MakePasswordList to lookup cli context for subcommand. +// Function ctx.SetGlobal is not setting the global flag value that can be accessed +// by ctx.GlobalString using the current version of cli package. +func makePasswordList(ctx *cli.Context) []string { + path := ctx.GlobalString(utils.PasswordFileFlag.Name) + if path == "" { + path = ctx.String(utils.PasswordFileFlag.Name) + if path == "" { + return nil + } + } + text, err := ioutil.ReadFile(path) + if err != nil { + utils.Fatalf("Failed to read password file: %v", err) + } + lines := strings.Split(string(text), "\n") + // Sanitise DOS line endings. + for i := range lines { + lines[i] = strings.TrimRight(lines[i], "\r") + } + return lines +} diff --git a/cmd/swarm/access_test.go b/cmd/swarm/access_test.go new file mode 100644 index 000000000000..163eb2b4d6f2 --- /dev/null +++ b/cmd/swarm/access_test.go @@ -0,0 +1,581 @@ +// Copyright 2018 The go-ethereum Authors +// This file is part of go-ethereum. +// +// go-ethereum is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// go-ethereum is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with go-ethereum. If not, see . +package main + +import ( + "bytes" + "crypto/rand" + "encoding/hex" + "encoding/json" + "io" + "io/ioutil" + gorand "math/rand" + "net/http" + "os" + "path/filepath" + "strings" + "testing" + "time" + + "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/crypto/sha3" + "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/swarm/api" + swarm "github.com/ethereum/go-ethereum/swarm/api/client" +) + +// TestAccessPassword tests for the correct creation of an ACT manifest protected by a password. +// The test creates bogus content, uploads it encrypted, then creates the wrapping manifest with the Access entry +// The parties participating - node (publisher), uploads to second node then disappears. Content which was uploaded +// is then fetched through 2nd node. since the tested code is not key-aware - we can just +// fetch from the 2nd node using HTTP BasicAuth +func TestAccessPassword(t *testing.T) { + cluster := newTestCluster(t, 1) + defer cluster.Shutdown() + proxyNode := cluster.Nodes[0] + + // create a tmp file + tmp, err := ioutil.TempDir("", "swarm-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmp) + + // write data to file + data := "notsorandomdata" + dataFilename := filepath.Join(tmp, "data.txt") + + err = ioutil.WriteFile(dataFilename, []byte(data), 0666) + if err != nil { + t.Fatal(err) + } + + hashRegexp := `[a-f\d]{128}` + + // upload the file with 'swarm up' and expect a hash + up := runSwarm(t, + "--bzzapi", + proxyNode.URL, //it doesn't matter through which node we upload content + "up", + "--encrypt", + dataFilename) + _, matches := up.ExpectRegexp(hashRegexp) + up.ExpectExit() + + if len(matches) < 1 { + t.Fatal("no matches found") + } + + ref := matches[0] + + password := "smth" + passwordFilename := filepath.Join(tmp, "password.txt") + + err = ioutil.WriteFile(passwordFilename, []byte(password), 0666) + if err != nil { + t.Fatal(err) + } + + up = runSwarm(t, + "access", + "new", + "pass", + "--dry-run", + "--password", + passwordFilename, + ref, + ) + + _, matches = up.ExpectRegexp(".+") + up.ExpectExit() + + if len(matches) == 0 { + t.Fatalf("stdout not matched") + } + + var m api.Manifest + + err = json.Unmarshal([]byte(matches[0]), &m) + if err != nil { + t.Fatalf("unmarshal manifest: %v", err) + } + + if len(m.Entries) != 1 { + t.Fatalf("expected one manifest entry, got %v", len(m.Entries)) + } + + e := m.Entries[0] + + ct := "application/bzz-manifest+json" + if e.ContentType != ct { + t.Errorf("expected %q content type, got %q", ct, e.ContentType) + } + + if e.Access == nil { + t.Fatal("manifest access is nil") + } + + a := e.Access + + if a.Type != "pass" { + t.Errorf(`got access type %q, expected "pass"`, a.Type) + } + if len(a.Salt) < 32 { + t.Errorf(`got salt with length %v, expected not less the 32 bytes`, len(a.Salt)) + } + if a.KdfParams == nil { + t.Fatal("manifest access kdf params is nil") + } + + client := swarm.NewClient(cluster.Nodes[0].URL) + + hash, err := client.UploadManifest(&m, false) + if err != nil { + t.Fatal(err) + } + + httpClient := &http.Client{} + + url := cluster.Nodes[0].URL + "/" + "bzz:/" + hash + response, err := httpClient.Get(url) + if err != nil { + t.Fatal(err) + } + if response.StatusCode != http.StatusUnauthorized { + t.Fatal("should be a 401") + } + authHeader := response.Header.Get("WWW-Authenticate") + if authHeader == "" { + t.Fatal("should be something here") + } + + req, err := http.NewRequest(http.MethodGet, url, nil) + if err != nil { + t.Fatal(err) + } + req.SetBasicAuth("", password) + + response, err = http.DefaultClient.Do(req) + if err != nil { + t.Fatal(err) + } + defer response.Body.Close() + + if response.StatusCode != http.StatusOK { + t.Errorf("expected status %v, got %v", http.StatusOK, response.StatusCode) + } + d, err := ioutil.ReadAll(response.Body) + if err != nil { + t.Fatal(err) + } + if string(d) != data { + t.Errorf("expected decrypted data %q, got %q", data, string(d)) + } + + wrongPasswordFilename := filepath.Join(tmp, "password-wrong.txt") + + err = ioutil.WriteFile(wrongPasswordFilename, []byte("just wr0ng"), 0666) + if err != nil { + t.Fatal(err) + } + + //download file with 'swarm down' with wrong password + up = runSwarm(t, + "--bzzapi", + proxyNode.URL, + "down", + "bzz:/"+hash, + tmp, + "--password", + wrongPasswordFilename) + + _, matches = up.ExpectRegexp("unauthorized") + if len(matches) != 1 && matches[0] != "unauthorized" { + t.Fatal(`"unauthorized" not found in output"`) + } + up.ExpectExit() +} + +// TestAccessPK tests for the correct creation of an ACT manifest between two parties (publisher and grantee). +// The test creates bogus content, uploads it encrypted, then creates the wrapping manifest with the Access entry +// The parties participating - node (publisher), uploads to second node (which is also the grantee) then disappears. +// Content which was uploaded is then fetched through the grantee's http proxy. Since the tested code is private-key aware, +// the test will fail if the proxy's given private key is not granted on the ACT. +func TestAccessPK(t *testing.T) { + // Setup Swarm and upload a test file to it + cluster := newTestCluster(t, 1) + defer cluster.Shutdown() + + // create a tmp file + tmp, err := ioutil.TempFile("", "swarm-test") + if err != nil { + t.Fatal(err) + } + defer tmp.Close() + defer os.Remove(tmp.Name()) + + // write data to file + data := "notsorandomdata" + _, err = io.WriteString(tmp, data) + if err != nil { + t.Fatal(err) + } + + hashRegexp := `[a-f\d]{128}` + + // upload the file with 'swarm up' and expect a hash + up := runSwarm(t, + "--bzzapi", + cluster.Nodes[0].URL, + "up", + "--encrypt", + tmp.Name()) + _, matches := up.ExpectRegexp(hashRegexp) + up.ExpectExit() + + if len(matches) < 1 { + t.Fatal("no matches found") + } + + ref := matches[0] + + pk := cluster.Nodes[0].PrivateKey + granteePubKey := crypto.CompressPubkey(&pk.PublicKey) + + publisherDir, err := ioutil.TempDir("", "swarm-account-dir-temp") + if err != nil { + t.Fatal(err) + } + + passFile, err := ioutil.TempFile("", "swarm-test") + if err != nil { + t.Fatal(err) + } + defer passFile.Close() + defer os.Remove(passFile.Name()) + _, err = io.WriteString(passFile, testPassphrase) + if err != nil { + t.Fatal(err) + } + _, publisherAccount := getTestAccount(t, publisherDir) + up = runSwarm(t, + "--bzzaccount", + publisherAccount.Address.String(), + "--password", + passFile.Name(), + "--datadir", + publisherDir, + "--bzzapi", + cluster.Nodes[0].URL, + "access", + "new", + "pk", + "--dry-run", + "--grant-key", + hex.EncodeToString(granteePubKey), + ref, + ) + + _, matches = up.ExpectRegexp(".+") + up.ExpectExit() + + if len(matches) == 0 { + t.Fatalf("stdout not matched") + } + + var m api.Manifest + + err = json.Unmarshal([]byte(matches[0]), &m) + if err != nil { + t.Fatalf("unmarshal manifest: %v", err) + } + + if len(m.Entries) != 1 { + t.Fatalf("expected one manifest entry, got %v", len(m.Entries)) + } + + e := m.Entries[0] + + ct := "application/bzz-manifest+json" + if e.ContentType != ct { + t.Errorf("expected %q content type, got %q", ct, e.ContentType) + } + + if e.Access == nil { + t.Fatal("manifest access is nil") + } + + a := e.Access + + if a.Type != "pk" { + t.Errorf(`got access type %q, expected "pk"`, a.Type) + } + if len(a.Salt) < 32 { + t.Errorf(`got salt with length %v, expected not less the 32 bytes`, len(a.Salt)) + } + if a.KdfParams != nil { + t.Fatal("manifest access kdf params should be nil") + } + + client := swarm.NewClient(cluster.Nodes[0].URL) + + hash, err := client.UploadManifest(&m, false) + if err != nil { + t.Fatal(err) + } + + httpClient := &http.Client{} + + url := cluster.Nodes[0].URL + "/" + "bzz:/" + hash + response, err := httpClient.Get(url) + if err != nil { + t.Fatal(err) + } + if response.StatusCode != http.StatusOK { + t.Fatal("should be a 200") + } + d, err := ioutil.ReadAll(response.Body) + if err != nil { + t.Fatal(err) + } + if string(d) != data { + t.Errorf("expected decrypted data %q, got %q", data, string(d)) + } +} + +// TestAccessACT tests the e2e creation, uploading and downloading of an ACT type access control +// the test fires up a 3 node cluster, then randomly picks 2 nodes which will be acting as grantees to the data +// set. the third node should fail decoding the reference as it will not be granted access. the publisher uploads through +// one of the nodes then disappears. +func TestAccessACT(t *testing.T) { + // Setup Swarm and upload a test file to it + cluster := newTestCluster(t, 3) + defer cluster.Shutdown() + + var uploadThroughNode = cluster.Nodes[0] + client := swarm.NewClient(uploadThroughNode.URL) + + r1 := gorand.New(gorand.NewSource(time.Now().UnixNano())) + nodeToSkip := r1.Intn(3) // a number between 0 and 2 (node indices in `cluster`) + // create a tmp file + tmp, err := ioutil.TempFile("", "swarm-test") + if err != nil { + t.Fatal(err) + } + defer tmp.Close() + defer os.Remove(tmp.Name()) + + // write data to file + data := "notsorandomdata" + _, err = io.WriteString(tmp, data) + if err != nil { + t.Fatal(err) + } + + hashRegexp := `[a-f\d]{128}` + + // upload the file with 'swarm up' and expect a hash + up := runSwarm(t, + "--bzzapi", + cluster.Nodes[0].URL, + "up", + "--encrypt", + tmp.Name()) + _, matches := up.ExpectRegexp(hashRegexp) + up.ExpectExit() + + if len(matches) < 1 { + t.Fatal("no matches found") + } + + ref := matches[0] + grantees := []string{} + for i, v := range cluster.Nodes { + if i == nodeToSkip { + continue + } + pk := v.PrivateKey + granteePubKey := crypto.CompressPubkey(&pk.PublicKey) + grantees = append(grantees, hex.EncodeToString(granteePubKey)) + } + + granteesPubkeyListFile, err := ioutil.TempFile("", "grantees-pubkey-list.csv") + if err != nil { + t.Fatal(err) + } + + _, err = granteesPubkeyListFile.WriteString(strings.Join(grantees, "\n")) + if err != nil { + t.Fatal(err) + } + + defer granteesPubkeyListFile.Close() + defer os.Remove(granteesPubkeyListFile.Name()) + + publisherDir, err := ioutil.TempDir("", "swarm-account-dir-temp") + if err != nil { + t.Fatal(err) + } + + passFile, err := ioutil.TempFile("", "swarm-test") + if err != nil { + t.Fatal(err) + } + defer passFile.Close() + defer os.Remove(passFile.Name()) + _, err = io.WriteString(passFile, testPassphrase) + if err != nil { + t.Fatal(err) + } + + _, publisherAccount := getTestAccount(t, publisherDir) + up = runSwarm(t, + "--bzzaccount", + publisherAccount.Address.String(), + "--password", + passFile.Name(), + "--datadir", + publisherDir, + "--bzzapi", + cluster.Nodes[0].URL, + "access", + "new", + "act", + "--grant-keys", + granteesPubkeyListFile.Name(), + ref, + ) + + _, matches = up.ExpectRegexp(`[a-f\d]{64}`) + up.ExpectExit() + + if len(matches) == 0 { + t.Fatalf("stdout not matched") + } + hash := matches[0] + m, _, err := client.DownloadManifest(hash) + if err != nil { + t.Fatalf("unmarshal manifest: %v", err) + } + + if len(m.Entries) != 1 { + t.Fatalf("expected one manifest entry, got %v", len(m.Entries)) + } + + e := m.Entries[0] + + ct := "application/bzz-manifest+json" + if e.ContentType != ct { + t.Errorf("expected %q content type, got %q", ct, e.ContentType) + } + + if e.Access == nil { + t.Fatal("manifest access is nil") + } + + a := e.Access + + if a.Type != "act" { + t.Fatalf(`got access type %q, expected "act"`, a.Type) + } + if len(a.Salt) < 32 { + t.Fatalf(`got salt with length %v, expected not less the 32 bytes`, len(a.Salt)) + } + if a.KdfParams != nil { + t.Fatal("manifest access kdf params should be nil") + } + + httpClient := &http.Client{} + + // all nodes except the skipped node should be able to decrypt the content + for i, node := range cluster.Nodes { + log.Debug("trying to fetch from node", "node index", i) + + url := node.URL + "/" + "bzz:/" + hash + response, err := httpClient.Get(url) + if err != nil { + t.Fatal(err) + } + log.Debug("got response from node", "response code", response.StatusCode) + + if i == nodeToSkip { + log.Debug("reached node to skip", "status code", response.StatusCode) + + if response.StatusCode != http.StatusUnauthorized { + t.Fatalf("should be a 401") + } + + continue + } + + if response.StatusCode != http.StatusOK { + t.Fatal("should be a 200") + } + d, err := ioutil.ReadAll(response.Body) + if err != nil { + t.Fatal(err) + } + if string(d) != data { + t.Errorf("expected decrypted data %q, got %q", data, string(d)) + } + } +} + +// TestKeypairSanity is a sanity test for the crypto scheme for ACT. it asserts the correct shared secret according to +// the specs at https://github.com/ethersphere/swarm-docs/blob/eb857afda906c6e7bb90d37f3f334ccce5eef230/act.md +func TestKeypairSanity(t *testing.T) { + salt := make([]byte, 32) + if _, err := io.ReadFull(rand.Reader, salt); err != nil { + t.Fatalf("reading from crypto/rand failed: %v", err.Error()) + } + sharedSecret := "a85586744a1ddd56a7ed9f33fa24f40dd745b3a941be296a0d60e329dbdb896d" + + for i, v := range []struct { + publisherPriv string + granteePub string + }{ + { + publisherPriv: "ec5541555f3bc6376788425e9d1a62f55a82901683fd7062c5eddcc373a73459", + granteePub: "0226f213613e843a413ad35b40f193910d26eb35f00154afcde9ded57479a6224a", + }, + { + publisherPriv: "70c7a73011aa56584a0009ab874794ee7e5652fd0c6911cd02f8b6267dd82d2d", + granteePub: "02e6f8d5e28faaa899744972bb847b6eb805a160494690c9ee7197ae9f619181db", + }, + } { + b, _ := hex.DecodeString(v.granteePub) + granteePub, _ := crypto.DecompressPubkey(b) + publisherPrivate, _ := crypto.HexToECDSA(v.publisherPriv) + + ssKey, err := api.NewSessionKeyPK(publisherPrivate, granteePub, salt) + if err != nil { + t.Fatal(err) + } + + hasher := sha3.NewKeccak256() + hasher.Write(salt) + shared, err := hex.DecodeString(sharedSecret) + if err != nil { + t.Fatal(err) + } + hasher.Write(shared) + sum := hasher.Sum(nil) + + if !bytes.Equal(ssKey, sum) { + t.Fatalf("%d: got a session key mismatch", i) + } + } +} diff --git a/cmd/swarm/bootnodes.go b/cmd/swarm/bootnodes.go new file mode 100644 index 000000000000..cbba9970da0e --- /dev/null +++ b/cmd/swarm/bootnodes.go @@ -0,0 +1,77 @@ +// Copyright 2018 The go-ethereum Authors +// This file is part of go-ethereum. +// +// go-ethereum is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// go-ethereum is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with go-ethereum. If not, see . + +package main + +var SwarmBootnodes = []string{ + // Foundation Swarm Gateway Cluster + "enode://e5c6f9215c919a5450a7b8c14c22535607b69f2c8e1e7f6f430cb25d7a2c27cd1df4c4f18ad7c1d7e5162e271ffcd3f20b1a1467fb6e790e7d727f3b2193de97@52.232.7.187:30399", + "enode://9b2fe07e69ccc7db5fef15793dab7d7d2e697ed92132d6e9548218e68a34613a8671ad03a6658d862b468ed693cae8a0f8f8d37274e4a657ffb59ca84676e45b@52.232.7.187:30400", + "enode://76c1059162c93ef9df0f01097c824d17c492634df211ef4c806935b349082233b63b90c23970254b3b7138d630400f7cf9b71e80355a446a8b733296cb04169a@52.232.7.187:30401", + "enode://ce46bbe2a8263145d65252d52da06e000ad350ed09c876a71ea9544efa42f63c1e1b6cc56307373aaad8f9dd069c90d0ed2dd1530106200e16f4ca681dd8ae2d@52.232.7.187:30402", + "enode://f431e0d6008a6c35c6e670373d828390c8323e53da8158e7bfc43cf07e632cc9e472188be8df01decadea2d4a068f1428caba769b632554a8fb0607bc296988f@52.232.7.187:30403", + "enode://174720abfff83d7392f121108ae50ea54e04889afe020df883655c0f6cb95414db945a0228d8982fe000d86fc9f4b7669161adc89cd7cd56f78f01489ab2b99b@52.232.7.187:30404", + "enode://2ae89be4be61a689b6f9ecee4360a59e185e010ab750f14b63b4ae43d4180e872e18e3437d4386ce44875dc7cc6eb761acba06412fe3178f3dac1dab3b65703e@52.232.7.187:30405", + "enode://24abebe1c0e6d75d6052ce3219a87be8573fd6397b4cb51f0773b83abba9b3d872bfb273cdc07389715b87adfac02f5235f5241442c5089802cbd8d42e310fce@52.232.7.187:30406", + "enode://d08dfa46bfbbdbcaafbb6e34abee4786610f6c91e0b76d7881f0334ac10dda41d8c1f2b6eedffb4493293c335c0ad46776443b2208d1fbbb9e1a90b25ee4eef2@52.232.7.187:30407", + "enode://8d95eb0f837d27581a43668ed3b8783d69dc4e84aa3edd7a0897e026155c8f59c8702fdc0375ee7bac15757c9c78e1315d9b73e4ce59c936db52ea4ae2f501c7@52.232.7.187:30408", + "enode://a5967cc804aebd422baaaba9f06f27c9e695ccab335b61088130f8cbe64e3cdf78793868c7051dfc06eecfe844fad54bc7f6dfaed9db3c7ecef279cb829c25fb@52.232.7.187:30409", + "enode://5f00134d81a8f2ebcc46f8766f627f492893eda48138f811b7de2168308171968f01710bca6da05764e74f14bae41652f554e6321f1aed85fa3461e89d075dbf@52.232.7.187:30410", + "enode://b2142b79b01a5aa66a5e23cc35e78219a8e97bc2412a6698cee24ae02e87078b725d71730711bd62e25ff1aa8658c6633778af8ac14c63814a337c3dd0ebda9f@52.232.7.187:30411", + "enode://1ffa7651094867d6486ce3ef46d27a052c2cb968b618346c6df7040322c7efc3337547ba85d4cbba32e8b31c42c867202554735c06d4c664b9afada2ed0c4b3c@52.232.7.187:30412", + "enode://129e0c3d5f5df12273754f6f703d2424409fa4baa599e0b758c55600169313887855e75b082028d2302ec034b303898cd697cc7ae8256ba924ce927510da2c8d@52.232.7.187:30413", + "enode://419e2dc0d2f5b022cf16b0e28842658284909fa027a0fbbb5e2b755e7f846ea02a8f0b66a7534981edf6a7bcf8a14855344c6668e2cd4476ccd35a11537c9144@52.232.7.187:30414", + "enode://23d55ad900583231b91f2f62e3f72eb498b342afd58b682be3af052eed62b5651094471065981de33d8786f075f05e3cca499503b0ac8ae84b2a06e99f5b0723@52.232.7.187:30415", + "enode://bc56e4158c00e9f616d7ea533def20a89bef959df4e62a768ff238ff4e1e9223f57ecff969941c20921bad98749baae311c0fbebce53bf7bbb9d3dc903640990@52.232.7.187:30416", + "enode://433ce15199c409875e7e72fffd69fdafe746f17b20f0d5555281722a65fde6c80328fab600d37d8624509adc072c445ce0dad4a1c01cff6acf3132c11d429d4d@52.232.7.187:30417", + "enode://632ee95b8f0eac51ef89ceb29313fef3a60050181d66a6b125583b1a225a7694b252edc016efb58aa3b251da756cb73280842a022c658ed405223b2f58626343@52.232.7.187:30418", + "enode://4a0f9bcff7a4b9ee453fb298d0fb222592efe121512e30cd72fef631beb8c6a15153a1456eb073ee18551c0e003c569651a101892dc4124e90b933733a498bb5@52.232.7.187:30419", + "enode://f0d80fbc72d16df30e19aac3051eb56a7aff0c8367686702e01ea132d8b0b3ee00cadd6a859d2cca98ec68d3d574f8a8a87dba2347ec1e2818dc84bc3fa34fae@52.232.7.187:30420", + "enode://a199146906e4f9f2b94b195a8308d9a59a3564b92efaab898a4243fe4c2ad918b7a8e4853d9d901d94fad878270a2669d644591299c3d43de1b298c00b92b4a7@52.232.7.187:30421", + "enode://052036ea8736b37adbfb684d90ce43e11b3591b51f31489d7c726b03618dea4f73b1e659deb928e6bf40564edcdcf08351643f42db3d4ca1c2b5db95dad59e94@52.232.7.187:30422", + "enode://460e2b8c6da8f12fac96c836e7d108f4b7ec55a1c64631bb8992339e117e1c28328fee83af863196e20af1487a655d13e5ceba90e980e92502d5bac5834c1f71@52.232.7.187:30423", + "enode://6d2cdd13741b2e72e9031e1b93c6d9a4e68de2844aa4e939f6a8a8498a7c1d7e2ee4c64217e92a6df08c9a32c6764d173552810ef1bd2ecb356532d389dd2136@52.232.7.187:30424", + "enode://62105fc25ce2cd5b299647f47eaa9211502dc76f0e9f461df915782df7242ac3223e3db04356ae6ed2977ccac20f0b16864406e9ca514a40a004cb6a5d0402aa@52.232.7.187:30425", + "enode://e0e388fc520fd493c33f0ce16685e6f98fb6aec28f2edc14ee6b179594ee519a896425b0025bb6f0e182dd3e468443f19c70885fbc66560d000093a668a86aa8@52.232.7.187:30426", + "enode://63f3353a72521ea10022127a4fe6b4acbef197c3fe668fd9f4805542d8a6fcf79f6335fbab62d180a35e19b739483e740858b113fdd7c13a26ad7b4e318a5aef@52.232.7.187:30427", + "enode://33a42b927085678d4aefd4e70b861cfca6ef5f6c143696c4f755973fd29e64c9e658cad57a66a687a7a156da1e3688b1fbdd17bececff2ee009fff038fa5666b@52.232.7.187:30428", + "enode://259ab5ab5c1daee3eab7e3819ab3177b82d25c29e6c2444fdd3f956e356afae79a72840ccf2d0665fe82c81ebc3b3734da1178ac9fd5d62c67e674b69f86b6be@52.232.7.187:30429", + "enode://558bccad7445ce3fd8db116ed6ab4aed1324fdbdac2348417340c1764dc46d46bffe0728e5b7d5c36f12e794c289f18f57f08f085d2c65c9910a5c7a65b6a66a@52.232.7.187:30430", + "enode://abe60937a0657ffded718e3f84a32987286983be257bdd6004775c4b525747c2b598f4fac49c8de324de5ce75b22673fa541a7ce2d555fb7f8ca325744ae3577@52.232.7.187:30431", + "enode://bce6f0aaa5b230742680084df71d4f026b3eff7f564265599216a1b06b765303fdc9325de30ffd5dfdaf302ce4b14322891d2faea50ce2ca298d7409f5858339@52.232.7.187:30432", + "enode://21b957c4e03277d42be6660730ec1b93f540764f26c6abdb54d006611139c7081248486206dfbf64fcaffd62589e9c6b8ea77a5297e4b21a605f1bcf49483ed0@52.232.7.187:30433", + "enode://ff104e30e64f24c3d7328acee8b13354e5551bc8d60bb25ecbd9632d955c7e34bb2d969482d173355baad91c8282f8b592624eb3929151090da3b4448d4d58fb@52.232.7.187:30434", + "enode://c76e2b5f81a521bceaec1518926a21380a345df9cf463461562c6845795512497fb67679e155fc96a74350f8b78de8f4c135dd52b106dbbb9795452021d09ea5@52.232.7.187:30435", + "enode://3288fd860105164f3e9b69934c4eb18f7146cfab31b5a671f994e21a36e9287766e5f9f075aefbc404538c77f7c2eb2a4495020a7633a1c3970d94e9fa770aeb@52.232.7.187:30436", + "enode://6cea859c7396d46b20cfcaa80f9a11cd112f8684f2f782f7b4c0e1e0af9212113429522075101923b9b957603e6c32095a6a07b5e5e35183c521952ee108dfaf@52.232.7.187:30437", + "enode://f628ec56e4ca8317cc24cc4ac9b27b95edcce7b96e1c7f3b53e30de4a8580fe44f2f0694a513bdb0a431acaf2824074d6ace4690247bbc34c14f426af8c056ea@52.232.7.187:30438", + "enode://055ec8b26fc105c4f97970a1cce9773a5e34c03f511b839db742198a1c571e292c54aa799e9afb991cc8a560529b8cdf3e0c344bc6c282aff2f68eec59361ddf@52.232.7.187:30439", + "enode://48cb0d430c328974226aa33a931d8446cd5a8d40f3ead8f4ce7ad60faa1278192eb6d58bed91258d63e81f255fc107eec2425ce2ae8b22350dd556076e160610@52.232.7.187:30440", + "enode://3fadb7af7f770d5ffc6b073b8d42834bebb18ce1fe8a4fe270d2b799e7051327093960dc61d9a18870db288f7746a0e6ea2a013cd6ab0e5f97ca08199473aace@52.232.7.187:30441", + "enode://a5d7168024c9992769cf380ffa559a64b4f39a29d468f579559863814eb0ae0ed689ac0871a3a2b4c78b03297485ec322d578281131ef5d5c09a4beb6200a97a@52.232.7.187:30442", + "enode://9c57744c5b2c2d71abcbe80512652f9234d4ab041b768a2a886ab390fe6f184860f40e113290698652d7e20a8ac74d27ac8671db23eb475b6c5e6253e4693bf8@52.232.7.187:30443", + "enode://daca9ff0c3176045a0e0ed228dee00ec86bc0939b135dc6b1caa23745d20fd0332e1ee74ad04020e89df56c7146d831a91b89d15ca3df05ba7618769fefab376@52.232.7.187:30444", + "enode://a3f6af59428cb4b9acb198db15ef5554fa43c2b0c18e468a269722d64a27218963a2975eaf82750b6262e42192b5e3669ea51337b4cda62b33987981bc5e0c1a@52.232.7.187:30445", + "enode://fe571422fa4651c3354c85dac61911a6a6520dd3c0332967a49d4133ca30e16a8a4946fa73ca2cb5de77917ea701a905e1c3015b2f4defcd53132b61cc84127a@52.232.7.187:30446", + + // Mainframe + "enode://ee9a5a571ea6c8a59f9a8bb2c569c865e922b41c91d09b942e8c1d4dd2e1725bd2c26149da14de1f6321a2c6fdf1e07c503c3e093fb61696daebf74d6acd916b@54.186.219.160:30399", + "enode://a03f0562ecb8a992ad5242345535e73483cdc18ab934d36bf24b567d43447c2cea68f89f1d51d504dd13acc30f24ebce5a150bea2ccb1b722122ce4271dc199d@52.67.248.147:30399", + "enode://e2cbf9eafd85903d3b1c56743035284320695e0072bc8d7396e0542aa5e1c321b236f67eab66b79c2f15d4447fa4bbe74dd67d0467da23e7eb829f60ec8a812b@13.58.169.1:30399", + "enode://8b8c6bda6047f1cad9fab2db4d3d02b7aa26279902c32879f7bcd4a7d189fee77fdc36ee151ce6b84279b4792e72578fd529d2274d014132465758fbfee51cee@13.209.13.15:30399", + "enode://63f6a8818927e429585287cf2ca0cb9b11fa990b7b9b331c2962cdc6f21807a2473b26e8256225c26caff70d7218e59586d704d49061452c6852e382c885d03c@35.154.106.174:30399", + "enode://ed4bd3b794ed73f18e6dcc70c6624dfec63b5654f6ab54e8f40b16eff8afbd342d4230e099ddea40e84423f81b2d2ea79799dc345257b1fec6f6c422c9d008f7@52.213.20.99:30399", +} diff --git a/cmd/swarm/config.go b/cmd/swarm/config.go index 64c37a0b5ef9..ae4b5816e60e 100644 --- a/cmd/swarm/config.go +++ b/cmd/swarm/config.go @@ -38,8 +38,6 @@ import ( bzzapi "github.com/ethereum/go-ethereum/swarm/api" ) -const SWARM_VERSION = "0.3" - var ( //flag definition for the dumpconfig command DumpConfigCommand = cli.Command{ @@ -70,6 +68,7 @@ const ( SWARM_ENV_SWAP_API = "SWARM_SWAP_API" SWARM_ENV_SYNC_DISABLE = "SWARM_SYNC_DISABLE" SWARM_ENV_SYNC_UPDATE_DELAY = "SWARM_ENV_SYNC_UPDATE_DELAY" + SWARM_ENV_LIGHT_NODE_ENABLE = "SWARM_LIGHT_NODE_ENABLE" SWARM_ENV_DELIVERY_SKIP_CHECK = "SWARM_DELIVERY_SKIP_CHECK" SWARM_ENV_ENS_API = "SWARM_ENS_API" SWARM_ENV_ENS_ADDR = "SWARM_ENS_ADDR" @@ -79,6 +78,7 @@ const ( SWARM_ENV_STORE_PATH = "SWARM_STORE_PATH" SWARM_ENV_STORE_CAPACITY = "SWARM_STORE_CAPACITY" SWARM_ENV_STORE_CACHE_CAPACITY = "SWARM_STORE_CACHE_CAPACITY" + SWARM_ACCESS_PASSWORD = "SWARM_ACCESS_PASSWORD" GETH_ENV_DATADIR = "GETH_DATADIR" ) @@ -133,7 +133,7 @@ func initSwarmNode(config *bzzapi.Config, stack *node.Node, ctx *cli.Context) { log.Debug(printConfig(config)) } -//override the current config with whatever is in the config file, if a config file has been provided +//configFileOverride overrides the current config with the config file, if a config file has been provided func configFileOverride(config *bzzapi.Config, ctx *cli.Context) (*bzzapi.Config, error) { var err error @@ -143,7 +143,8 @@ func configFileOverride(config *bzzapi.Config, ctx *cli.Context) (*bzzapi.Config if filepath = ctx.GlobalString(SwarmTomlConfigPathFlag.Name); filepath == "" { utils.Fatalf("Config file flag provided with invalid file path") } - f, err := os.Open(filepath) + var f *os.File + f, err = os.Open(filepath) if err != nil { return nil, err } @@ -206,6 +207,10 @@ func cmdLineOverride(currentConfig *bzzapi.Config, ctx *cli.Context) *bzzapi.Con currentConfig.SyncUpdateDelay = d } + if ctx.GlobalIsSet(SwarmLightNodeEnabled.Name) { + currentConfig.LightNodeEnabled = true + } + if ctx.GlobalIsSet(SwarmDeliverySkipCheckFlag.Name) { currentConfig.DeliverySkipCheck = true } @@ -228,10 +233,6 @@ func cmdLineOverride(currentConfig *bzzapi.Config, ctx *cli.Context) *bzzapi.Con currentConfig.Cors = cors } - if ctx.GlobalIsSet(utils.BootnodesFlag.Name) { - currentConfig.BootNodes = ctx.GlobalString(utils.BootnodesFlag.Name) - } - if storePath := ctx.GlobalString(SwarmStorePath.Name); storePath != "" { currentConfig.LocalStoreParams.ChunkDbPath = storePath } @@ -303,6 +304,12 @@ func envVarsOverride(currentConfig *bzzapi.Config) (config *bzzapi.Config) { } } + if lne := os.Getenv(SWARM_ENV_LIGHT_NODE_ENABLE); lne != "" { + if lightnode, err := strconv.ParseBool(lne); err != nil { + currentConfig.LightNodeEnabled = lightnode + } + } + if swapapi := os.Getenv(SWARM_ENV_SWAP_API); swapapi != "" { currentConfig.SwapAPI = swapapi } @@ -323,10 +330,6 @@ func envVarsOverride(currentConfig *bzzapi.Config) (config *bzzapi.Config) { currentConfig.Cors = cors } - if bootnodes := os.Getenv(SWARM_ENV_BOOTNODES); bootnodes != "" { - currentConfig.BootNodes = bootnodes - } - return currentConfig } diff --git a/cmd/swarm/config_test.go b/cmd/swarm/config_test.go index d5011e3a7069..02198f878ec4 100644 --- a/cmd/swarm/config_test.go +++ b/cmd/swarm/config_test.go @@ -20,6 +20,7 @@ import ( "fmt" "io" "io/ioutil" + "net" "os" "os/exec" "testing" @@ -559,3 +560,16 @@ func TestValidateConfig(t *testing.T) { } } } + +func assignTCPPort() (string, error) { + l, err := net.Listen("tcp", "127.0.0.1:0") + if err != nil { + return "", err + } + l.Close() + _, port, err := net.SplitHostPort(l.Addr().String()) + if err != nil { + return "", err + } + return port, nil +} diff --git a/cmd/swarm/download.go b/cmd/swarm/download.go index c2418f744c84..91bc2c93abc3 100644 --- a/cmd/swarm/download.go +++ b/cmd/swarm/download.go @@ -68,18 +68,36 @@ func download(ctx *cli.Context) { utils.Fatalf("could not parse uri argument: %v", err) } - // assume behaviour according to --recursive switch - if isRecursive { - if err := client.DownloadDirectory(uri.Addr, uri.Path, dest); err != nil { - utils.Fatalf("encoutered an error while downloading directory: %v", err) - } - } else { - // we are downloading a file - log.Debug(fmt.Sprintf("downloading file/path from a manifest. hash: %s, path:%s", uri.Addr, uri.Path)) + dl := func(credentials string) error { + // assume behaviour according to --recursive switch + if isRecursive { + if err := client.DownloadDirectory(uri.Addr, uri.Path, dest, credentials); err != nil { + if err == swarm.ErrUnauthorized { + return err + } + return fmt.Errorf("directory %s: %v", uri.Path, err) + } + } else { + // we are downloading a file + log.Debug("downloading file/path from a manifest", "uri.Addr", uri.Addr, "uri.Path", uri.Path) - err := client.DownloadFile(uri.Addr, uri.Path, dest) - if err != nil { - utils.Fatalf("could not download %s from given address: %s. error: %v", uri.Path, uri.Addr, err) + err := client.DownloadFile(uri.Addr, uri.Path, dest, credentials) + if err != nil { + if err == swarm.ErrUnauthorized { + return err + } + return fmt.Errorf("file %s from address: %s: %v", uri.Path, uri.Addr, err) + } } + return nil + } + if passwords := makePasswordList(ctx); passwords != nil { + password := getPassPhrase(fmt.Sprintf("Downloading %s is restricted", uri), 0, passwords) + err = dl(password) + } else { + err = dl("") + } + if err != nil { + utils.Fatalf("download: %v", err) } } diff --git a/cmd/swarm/fs.go b/cmd/swarm/fs.go index 0124586cfe19..3dc38ca4daca 100644 --- a/cmd/swarm/fs.go +++ b/cmd/swarm/fs.go @@ -92,7 +92,7 @@ func listMounts(cliContext *cli.Context) { mf := []fuse.MountInfo{} err = client.CallContext(ctx, &mf, "swarmfs_listmounts") if err != nil { - utils.Fatalf("encountered an error calling the RPC endpoint while unmounting: %v", err) + utils.Fatalf("encountered an error calling the RPC endpoint while listing mounts: %v", err) } if len(mf) == 0 { fmt.Print("Could not found any swarmfs mounts. Please make sure you've specified the correct RPC endpoint\n") diff --git a/cmd/swarm/fs_test.go b/cmd/swarm/fs_test.go index 25705c0a49f9..4f38b094bbf2 100644 --- a/cmd/swarm/fs_test.go +++ b/cmd/swarm/fs_test.go @@ -14,6 +14,8 @@ // You should have received a copy of the GNU General Public License // along with go-ethereum. If not, see . +// +build linux freebsd + package main import ( @@ -41,6 +43,11 @@ type testFile struct { } // TestCLISwarmFs is a high-level test of swarmfs +// +// This test fails on travis for macOS as this executable exits with code 1 +// and without any log messages in the log: +// /Library/Filesystems/osxfuse.fs/Contents/Resources/load_osxfuse. +// This is the reason for this file not being built on darwin architecture. func TestCLISwarmFs(t *testing.T) { cluster := newTestCluster(t, 3) defer cluster.Shutdown() diff --git a/cmd/swarm/hash.go b/cmd/swarm/hash.go index c82456b3cdda..bca4955b158d 100644 --- a/cmd/swarm/hash.go +++ b/cmd/swarm/hash.go @@ -18,6 +18,7 @@ package main import ( + "context" "fmt" "os" @@ -39,7 +40,7 @@ func hash(ctx *cli.Context) { stat, _ := f.Stat() fileStore := storage.NewFileStore(storage.NewMapChunkStore(), storage.NewFileStoreParams()) - addr, _, err := fileStore.Store(f, stat.Size(), false) + addr, _, err := fileStore.Store(context.TODO(), f, stat.Size(), false) if err != nil { utils.Fatalf("%v\n", err) } else { diff --git a/cmd/swarm/list.go b/cmd/swarm/list.go index 57b5517c6ef5..01b3f4ab6c5b 100644 --- a/cmd/swarm/list.go +++ b/cmd/swarm/list.go @@ -44,7 +44,7 @@ func list(ctx *cli.Context) { bzzapi := strings.TrimRight(ctx.GlobalString(SwarmApiFlag.Name), "/") client := swarm.NewClient(bzzapi) - list, err := client.List(manifest, prefix) + list, err := client.List(manifest, prefix, "") if err != nil { utils.Fatalf("Failed to generate file and directory list: %s", err) } diff --git a/cmd/swarm/main.go b/cmd/swarm/main.go index 9877e9150d47..637ae06e9637 100644 --- a/cmd/swarm/main.go +++ b/cmd/swarm/main.go @@ -37,12 +37,12 @@ import ( "github.com/ethereum/go-ethereum/internal/debug" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/node" - "github.com/ethereum/go-ethereum/p2p" "github.com/ethereum/go-ethereum/p2p/discover" - "github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/swarm" bzzapi "github.com/ethereum/go-ethereum/swarm/api" swarmmetrics "github.com/ethereum/go-ethereum/swarm/metrics" + "github.com/ethereum/go-ethereum/swarm/tracing" + sv "github.com/ethereum/go-ethereum/swarm/version" "gopkg.in/urfave/cli.v1" ) @@ -66,14 +66,7 @@ OPTIONS: ` var ( - gitCommit string // Git SHA1 commit hash of the release (set via linker flags) - testbetBootNodes = []string{ - "enode://ec8ae764f7cb0417bdfb009b9d0f18ab3818a3a4e8e7c67dd5f18971a93510a2e6f43cd0b69a27e439a9629457ea804104f37c85e41eed057d3faabbf7744cdf@13.74.157.139:30429", - "enode://c2e1fceb3bf3be19dff71eec6cccf19f2dbf7567ee017d130240c670be8594bc9163353ca55dd8df7a4f161dd94b36d0615c17418b5a3cdcbb4e9d99dfa4de37@13.74.157.139:30430", - "enode://fe29b82319b734ce1ec68b84657d57145fee237387e63273989d354486731e59f78858e452ef800a020559da22dcca759536e6aa5517c53930d29ce0b1029286@13.74.157.139:30431", - "enode://1d7187e7bde45cf0bee489ce9852dd6d1a0d9aa67a33a6b8e6db8a4fbc6fcfa6f0f1a5419343671521b863b187d1c73bad3603bae66421d157ffef357669ddb8@13.74.157.139:30432", - "enode://0e4cba800f7b1ee73673afa6a4acead4018f0149d2e3216be3f133318fd165b324cd71b81fbe1e80deac8dbf56e57a49db7be67f8b9bc81bd2b7ee496434fb5d@13.74.157.139:30433", - } + gitCommit string // Git SHA1 commit hash of the release (set via linker flags) ) var ( @@ -122,6 +115,11 @@ var ( Usage: "Duration for sync subscriptions update after no new peers are added (default 15s)", EnvVar: SWARM_ENV_SYNC_UPDATE_DELAY, } + SwarmLightNodeEnabled = cli.BoolFlag{ + Name: "lightnode", + Usage: "Enable Swarm LightNode (default false)", + EnvVar: SWARM_ENV_LIGHT_NODE_ENABLE, + } SwarmDeliverySkipCheckFlag = cli.BoolFlag{ Name: "delivery-skip-check", Usage: "Skip chunk delivery check (default false)", @@ -143,24 +141,41 @@ var ( } SwarmWantManifestFlag = cli.BoolTFlag{ Name: "manifest", - Usage: "Automatic manifest upload", + Usage: "Automatic manifest upload (default true)", } SwarmUploadDefaultPath = cli.StringFlag{ Name: "defaultpath", Usage: "path to file served for empty url path (none)", } + SwarmAccessGrantKeyFlag = cli.StringFlag{ + Name: "grant-key", + Usage: "grants a given public key access to an ACT", + } + SwarmAccessGrantKeysFlag = cli.StringFlag{ + Name: "grant-keys", + Usage: "grants a given list of public keys in the following file (separated by line breaks) access to an ACT", + } SwarmUpFromStdinFlag = cli.BoolFlag{ Name: "stdin", Usage: "reads data to be uploaded from stdin", } SwarmUploadMimeType = cli.StringFlag{ Name: "mime", - Usage: "force mime type", + Usage: "Manually specify MIME type", } SwarmEncryptedFlag = cli.BoolFlag{ Name: "encrypt", Usage: "use encrypted upload", } + SwarmAccessPasswordFlag = cli.StringFlag{ + Name: "password", + Usage: "Password", + EnvVar: SWARM_ACCESS_PASSWORD, + } + SwarmDryRunFlag = cli.BoolFlag{ + Name: "dry-run", + Usage: "dry-run", + } CorsStringFlag = cli.StringFlag{ Name: "corsdomain", Usage: "Domain on which to send Access-Control-Allow-Origin header (multiple domains can be supplied separated by a ',')", @@ -181,6 +196,18 @@ var ( Usage: "Number of recent chunks cached in memory (default 5000)", EnvVar: SWARM_ENV_STORE_CACHE_CAPACITY, } + SwarmResourceMultihashFlag = cli.BoolFlag{ + Name: "multihash", + Usage: "Determines how to interpret data for a resource update. If not present, data will be interpreted as raw, literal data that will be included in the resource", + } + SwarmResourceNameFlag = cli.StringFlag{ + Name: "name", + Usage: "User-defined name for the new resource", + } + SwarmResourceDataOnCreateFlag = cli.StringFlag{ + Name: "data", + Usage: "Initializes the resource with the given hex-encoded data. Data must be prefixed by 0x", + } ) //declare a few constant error messages, useful for later error check comparisons in test @@ -189,12 +216,21 @@ var ( SWARM_ERR_SWAP_SET_NO_API = "SWAP is enabled but --swap-api is not set" ) +// this help command gets added to any subcommand that does not define it explicitly +var defaultSubcommandHelp = cli.Command{ + Action: func(ctx *cli.Context) { cli.ShowCommandHelpAndExit(ctx, "", 1) }, + CustomHelpTemplate: helpTemplate, + Name: "help", + Usage: "shows this help", + Hidden: true, +} + var defaultNodeConfig = node.DefaultConfig // This init function sets defaults so cmd/swarm can run alongside geth. func init() { defaultNodeConfig.Name = clientIdentifier - defaultNodeConfig.Version = params.VersionWithCommit(gitCommit) + defaultNodeConfig.Version = sv.VersionWithCommit(gitCommit) defaultNodeConfig.P2P.ListenAddr = ":30399" defaultNodeConfig.IPCPath = "bzzd.ipc" // Set flag defaults for --help display. @@ -225,6 +261,96 @@ func init() { Flags: []cli.Flag{SwarmEncryptedFlag}, Description: "uploads a file or directory to swarm using the HTTP API and prints the root hash", }, + { + CustomHelpTemplate: helpTemplate, + Name: "access", + Usage: "encrypts a reference and embeds it into a root manifest", + ArgsUsage: "", + Description: "encrypts a reference and embeds it into a root manifest", + Subcommands: []cli.Command{ + { + CustomHelpTemplate: helpTemplate, + Name: "new", + Usage: "encrypts a reference and embeds it into a root manifest", + ArgsUsage: "", + Description: "encrypts a reference and embeds it into a root access manifest and prints the resulting manifest", + Subcommands: []cli.Command{ + { + Action: accessNewPass, + CustomHelpTemplate: helpTemplate, + Flags: []cli.Flag{ + utils.PasswordFileFlag, + SwarmDryRunFlag, + }, + Name: "pass", + Usage: "encrypts a reference with a password and embeds it into a root manifest", + ArgsUsage: "", + Description: "encrypts a reference and embeds it into a root access manifest and prints the resulting manifest", + }, + { + Action: accessNewPK, + CustomHelpTemplate: helpTemplate, + Flags: []cli.Flag{ + utils.PasswordFileFlag, + SwarmDryRunFlag, + SwarmAccessGrantKeyFlag, + }, + Name: "pk", + Usage: "encrypts a reference with the node's private key and a given grantee's public key and embeds it into a root manifest", + ArgsUsage: "", + Description: "encrypts a reference and embeds it into a root access manifest and prints the resulting manifest", + }, + { + Action: accessNewACT, + CustomHelpTemplate: helpTemplate, + Flags: []cli.Flag{ + SwarmAccessGrantKeysFlag, + SwarmDryRunFlag, + }, + Name: "act", + Usage: "encrypts a reference with the node's private key and a given grantee's public key and embeds it into a root manifest", + ArgsUsage: "", + Description: "encrypts a reference and embeds it into a root access manifest and prints the resulting manifest", + }, + }, + }, + }, + }, + { + CustomHelpTemplate: helpTemplate, + Name: "resource", + Usage: "(Advanced) Create and update Mutable Resources", + ArgsUsage: "", + Description: "Works with Mutable Resource Updates", + Subcommands: []cli.Command{ + { + Action: resourceCreate, + CustomHelpTemplate: helpTemplate, + Name: "create", + Usage: "creates a new Mutable Resource", + ArgsUsage: "", + Description: "creates a new Mutable Resource", + Flags: []cli.Flag{SwarmResourceNameFlag, SwarmResourceDataOnCreateFlag, SwarmResourceMultihashFlag}, + }, + { + Action: resourceUpdate, + CustomHelpTemplate: helpTemplate, + Name: "update", + Usage: "updates the content of an existing Mutable Resource", + ArgsUsage: " <0x Hex data>", + Description: "updates the content of an existing Mutable Resource", + Flags: []cli.Flag{SwarmResourceMultihashFlag}, + }, + { + Action: resourceInfo, + CustomHelpTemplate: helpTemplate, + Name: "info", + Usage: "obtains information about an existing Mutable Resource", + ArgsUsage: "", + Description: "obtains information about an existing Mutable Resource", + }, + }, + }, { Action: list, CustomHelpTemplate: helpTemplate, @@ -242,16 +368,13 @@ func init() { Description: "Prints the swarm hash of file or directory", }, { - Action: download, - Name: "down", - Flags: []cli.Flag{SwarmRecursiveFlag}, - Usage: "downloads a swarm manifest or a file inside a manifest", - ArgsUsage: " []", - Description: ` -Downloads a swarm bzz uri to the given dir. When no dir is provided, working directory is assumed. --recursive flag is expected when downloading a manifest with multiple entries. -`, + Action: download, + Name: "down", + Flags: []cli.Flag{SwarmRecursiveFlag, SwarmAccessPasswordFlag}, + Usage: "downloads a swarm manifest or a file inside a manifest", + ArgsUsage: " []", + Description: `Downloads a swarm bzz uri to the given dir. When no dir is provided, working directory is assumed. --recursive flag is expected when downloading a manifest with multiple entries.`, }, - { Name: "manifest", CustomHelpTemplate: helpTemplate, @@ -260,23 +383,23 @@ Downloads a swarm bzz uri to the given dir. When no dir is provided, working dir Description: "Updates a MANIFEST by adding/removing/updating the hash of a path.\nCOMMAND could be: add, update, remove", Subcommands: []cli.Command{ { - Action: add, + Action: manifestAdd, CustomHelpTemplate: helpTemplate, Name: "add", Usage: "add a new path to the manifest", - ArgsUsage: " []", + ArgsUsage: " ", Description: "Adds a new path to the manifest", }, { - Action: update, + Action: manifestUpdate, CustomHelpTemplate: helpTemplate, Name: "update", Usage: "update the hash for an already existing path in the manifest", - ArgsUsage: " []", + ArgsUsage: " ", Description: "Update the hash for an already existing path in the manifest", }, { - Action: remove, + Action: manifestRemove, CustomHelpTemplate: helpTemplate, Name: "remove", Usage: "removes a path from the manifest", @@ -351,16 +474,14 @@ pv(1) tool to get a progress bar: Name: "import", Usage: "import chunks from a tar archive into a local chunk database (use - to read from stdin)", ArgsUsage: " ", - Description: ` -Import chunks from a tar archive into a local chunk database (use - to read from stdin). + Description: `Import chunks from a tar archive into a local chunk database (use - to read from stdin). swarm db import ~/.ethereum/swarm/bzz-KEY/chunks chunks.tar The import may be quite large, consider piping the input through the Unix pv(1) tool to get a progress bar: - pv chunks.tar | swarm db import ~/.ethereum/swarm/bzz-KEY/chunks - -`, + pv chunks.tar | swarm db import ~/.ethereum/swarm/bzz-KEY/chunks -`, }, { Action: dbClean, @@ -376,6 +497,11 @@ pv(1) tool to get a progress bar: // See config.go DumpConfigCommand, } + + // append a hidden help subcommand to all commands that have subcommands + // if a help command was already defined above, that one will take precedence. + addDefaultHelpSubcommands(app.Commands) + sort.Sort(cli.CommandsByName(app.Commands)) app.Flags = []cli.Flag{ @@ -402,6 +528,7 @@ pv(1) tool to get a progress bar: SwarmSwapAPIFlag, SwarmSyncDisabledFlag, SwarmSyncUpdateDelay, + SwarmLightNodeEnabled, SwarmDeliverySkipCheckFlag, SwarmListenAddrFlag, SwarmPortFlag, @@ -430,12 +557,14 @@ pv(1) tool to get a progress bar: app.Flags = append(app.Flags, rpcFlags...) app.Flags = append(app.Flags, debug.Flags...) app.Flags = append(app.Flags, swarmmetrics.Flags...) + app.Flags = append(app.Flags, tracing.Flags...) app.Before = func(ctx *cli.Context) error { runtime.GOMAXPROCS(runtime.NumCPU()) - if err := debug.Setup(ctx); err != nil { + if err := debug.Setup(ctx, ""); err != nil { return err } swarmmetrics.Setup(ctx) + tracing.Setup(ctx) return nil } app.After = func(ctx *cli.Context) error { @@ -452,7 +581,8 @@ func main() { } func version(ctx *cli.Context) error { - fmt.Println("Version:", SWARM_VERSION) + fmt.Println(strings.Title(clientIdentifier)) + fmt.Println("Version:", sv.VersionWithMeta) if gitCommit != "" { fmt.Println("Git Commit:", gitCommit) } @@ -464,6 +594,7 @@ func version(ctx *cli.Context) error { func bzzd(ctx *cli.Context) error { //build a valid bzzapi.Config from all available sources: //default config, file config, command line and env vars + bzzconfig, err := buildConfig(ctx) if err != nil { utils.Fatalf("unable to configure swarm: %v", err) @@ -480,12 +611,16 @@ func bzzd(ctx *cli.Context) error { if _, err := os.Stat(bzzconfig.Path); err == nil { cfg.DataDir = bzzconfig.Path } + + //optionally set the bootnodes before configuring the node + setSwarmBootstrapNodes(ctx, &cfg) //setup the ethereum node utils.SetNodeConfig(ctx, &cfg) stack, err := node.New(&cfg) if err != nil { utils.Fatalf("can't create node: %v", err) } + //a few steps need to be done after the config phase is completed, //due to overriding behavior initSwarmNode(bzzconfig, stack, ctx) @@ -503,16 +638,6 @@ func bzzd(ctx *cli.Context) error { stack.Stop() }() - // Add bootnodes as initial peers. - if bzzconfig.BootNodes != "" { - bootnodes := strings.Split(bzzconfig.BootNodes, ",") - injectBootnodes(stack.Server(), bootnodes) - } else { - if bzzconfig.NetworkID == 3 { - injectBootnodes(stack.Server(), testbetBootNodes) - } - } - stack.Wait() return nil } @@ -546,6 +671,26 @@ func getAccount(bzzaccount string, ctx *cli.Context, stack *node.Node) *ecdsa.Pr return decryptStoreAccount(ks, bzzaccount, utils.MakePasswordList(ctx)) } +// getPrivKey returns the private key of the specified bzzaccount +// Used only by client commands, such as `resource` +func getPrivKey(ctx *cli.Context) *ecdsa.PrivateKey { + // booting up the swarm node just as we do in bzzd action + bzzconfig, err := buildConfig(ctx) + if err != nil { + utils.Fatalf("unable to configure swarm: %v", err) + } + cfg := defaultNodeConfig + if _, err := os.Stat(bzzconfig.Path); err == nil { + cfg.DataDir = bzzconfig.Path + } + utils.SetNodeConfig(ctx, &cfg) + stack, err := node.New(&cfg) + if err != nil { + utils.Fatalf("can't create node: %v", err) + } + return getAccount(bzzconfig.BzzAccount, ctx, stack) +} + func decryptStoreAccount(ks *keystore.KeyStore, account string, passwords []string) *ecdsa.PrivateKey { var a accounts.Account var err error @@ -600,13 +745,32 @@ func getPassPhrase(prompt string, i int, passwords []string) string { return password } -func injectBootnodes(srv *p2p.Server, nodes []string) { - for _, url := range nodes { - n, err := discover.ParseNode(url) +// addDefaultHelpSubcommand scans through defined CLI commands and adds +// a basic help subcommand to each +// if a help command is already defined, it will take precedence over the default. +func addDefaultHelpSubcommands(commands []cli.Command) { + for i := range commands { + cmd := &commands[i] + if cmd.Subcommands != nil { + cmd.Subcommands = append(cmd.Subcommands, defaultSubcommandHelp) + addDefaultHelpSubcommands(cmd.Subcommands) + } + } +} + +func setSwarmBootstrapNodes(ctx *cli.Context, cfg *node.Config) { + if ctx.GlobalIsSet(utils.BootnodesFlag.Name) || ctx.GlobalIsSet(utils.BootnodesV4Flag.Name) { + return + } + + cfg.P2P.BootstrapNodes = []*discover.Node{} + + for _, url := range SwarmBootnodes { + node, err := discover.ParseNode(url) if err != nil { - log.Error("Invalid swarm bootnode", "err", err) - continue + log.Error("Bootstrap URL invalid", "enode", url, "err", err) } - srv.AddPeer(n) + cfg.P2P.BootstrapNodes = append(cfg.P2P.BootstrapNodes, node) } + log.Debug("added default swarm bootnodes", "length", len(cfg.P2P.BootstrapNodes)) } diff --git a/cmd/swarm/manifest.go b/cmd/swarm/manifest.go index 82166edf6c80..0216ffc1dd70 100644 --- a/cmd/swarm/manifest.go +++ b/cmd/swarm/manifest.go @@ -18,10 +18,8 @@ package main import ( - "encoding/json" "fmt" - "mime" - "path/filepath" + "os" "strings" "github.com/ethereum/go-ethereum/cmd/utils" @@ -30,127 +28,118 @@ import ( "gopkg.in/urfave/cli.v1" ) -const bzzManifestJSON = "application/bzz-manifest+json" - -func add(ctx *cli.Context) { +// manifestAdd adds a new entry to the manifest at the given path. +// New entry hash, the last argument, must be the hash of a manifest +// with only one entry, which meta-data will be added to the original manifest. +// On success, this function will print new (updated) manifest's hash. +func manifestAdd(ctx *cli.Context) { args := ctx.Args() - if len(args) < 3 { - utils.Fatalf("Need at least three arguments []") + if len(args) != 3 { + utils.Fatalf("Need exactly three arguments ") } var ( mhash = args[0] path = args[1] hash = args[2] - - ctype string - wantManifest = ctx.GlobalBoolT(SwarmWantManifestFlag.Name) - mroot api.Manifest ) - if len(args) > 3 { - ctype = args[3] - } else { - ctype = mime.TypeByExtension(filepath.Ext(path)) + bzzapi := strings.TrimRight(ctx.GlobalString(SwarmApiFlag.Name), "/") + client := swarm.NewClient(bzzapi) + + m, _, err := client.DownloadManifest(hash) + if err != nil { + utils.Fatalf("Error downloading manifest to add: %v", err) + } + l := len(m.Entries) + if l == 0 { + utils.Fatalf("No entries in manifest %s", hash) + } else if l > 1 { + utils.Fatalf("Too many entries in manifest %s", hash) } - newManifest := addEntryToManifest(ctx, mhash, path, hash, ctype) + newManifest := addEntryToManifest(client, mhash, path, m.Entries[0]) fmt.Println(newManifest) - - if !wantManifest { - // Print the manifest. This is the only output to stdout. - mrootJSON, _ := json.MarshalIndent(mroot, "", " ") - fmt.Println(string(mrootJSON)) - return - } } -func update(ctx *cli.Context) { - +// manifestUpdate replaces an existing entry of the manifest at the given path. +// New entry hash, the last argument, must be the hash of a manifest +// with only one entry, which meta-data will be added to the original manifest. +// On success, this function will print hash of the updated manifest. +func manifestUpdate(ctx *cli.Context) { args := ctx.Args() - if len(args) < 3 { - utils.Fatalf("Need at least three arguments ") + if len(args) != 3 { + utils.Fatalf("Need exactly three arguments ") } var ( mhash = args[0] path = args[1] hash = args[2] - - ctype string - wantManifest = ctx.GlobalBoolT(SwarmWantManifestFlag.Name) - mroot api.Manifest ) - if len(args) > 3 { - ctype = args[3] - } else { - ctype = mime.TypeByExtension(filepath.Ext(path)) - } - newManifest := updateEntryInManifest(ctx, mhash, path, hash, ctype) - fmt.Println(newManifest) + bzzapi := strings.TrimRight(ctx.GlobalString(SwarmApiFlag.Name), "/") + client := swarm.NewClient(bzzapi) - if !wantManifest { - // Print the manifest. This is the only output to stdout. - mrootJSON, _ := json.MarshalIndent(mroot, "", " ") - fmt.Println(string(mrootJSON)) - return + m, _, err := client.DownloadManifest(hash) + if err != nil { + utils.Fatalf("Error downloading manifest to update: %v", err) + } + l := len(m.Entries) + if l == 0 { + utils.Fatalf("No entries in manifest %s", hash) + } else if l > 1 { + utils.Fatalf("Too many entries in manifest %s", hash) } + + newManifest, _, defaultEntryUpdated := updateEntryInManifest(client, mhash, path, m.Entries[0], true) + if defaultEntryUpdated { + // Print informational message to stderr + // allowing the user to get the new manifest hash from stdout + // without the need to parse the complete output. + fmt.Fprintln(os.Stderr, "Manifest default entry is updated, too") + } + fmt.Println(newManifest) } -func remove(ctx *cli.Context) { +// manifestRemove removes an existing entry of the manifest at the given path. +// On success, this function will print hash of the manifest which does not +// contain the path. +func manifestRemove(ctx *cli.Context) { args := ctx.Args() - if len(args) < 2 { - utils.Fatalf("Need at least two arguments ") + if len(args) != 2 { + utils.Fatalf("Need exactly two arguments ") } var ( mhash = args[0] path = args[1] - - wantManifest = ctx.GlobalBoolT(SwarmWantManifestFlag.Name) - mroot api.Manifest ) - newManifest := removeEntryFromManifest(ctx, mhash, path) - fmt.Println(newManifest) + bzzapi := strings.TrimRight(ctx.GlobalString(SwarmApiFlag.Name), "/") + client := swarm.NewClient(bzzapi) - if !wantManifest { - // Print the manifest. This is the only output to stdout. - mrootJSON, _ := json.MarshalIndent(mroot, "", " ") - fmt.Println(string(mrootJSON)) - return - } + newManifest := removeEntryFromManifest(client, mhash, path) + fmt.Println(newManifest) } -func addEntryToManifest(ctx *cli.Context, mhash, path, hash, ctype string) string { - - var ( - bzzapi = strings.TrimRight(ctx.GlobalString(SwarmApiFlag.Name), "/") - client = swarm.NewClient(bzzapi) - longestPathEntry = api.ManifestEntry{} - ) +func addEntryToManifest(client *swarm.Client, mhash, path string, entry api.ManifestEntry) string { + var longestPathEntry = api.ManifestEntry{} mroot, isEncrypted, err := client.DownloadManifest(mhash) if err != nil { utils.Fatalf("Manifest download failed: %v", err) } - //TODO: check if the "hash" to add is valid and present in swarm - _, _, err = client.DownloadManifest(hash) - if err != nil { - utils.Fatalf("Hash to add is not present: %v", err) - } - // See if we path is in this Manifest or do we have to dig deeper - for _, entry := range mroot.Entries { - if path == entry.Path { + for _, e := range mroot.Entries { + if path == e.Path { utils.Fatalf("Path %s already present, not adding anything", path) } else { - if entry.ContentType == bzzManifestJSON { - prfxlen := strings.HasPrefix(path, entry.Path) + if e.ContentType == api.ManifestType { + prfxlen := strings.HasPrefix(path, e.Path) if prfxlen && len(path) > len(longestPathEntry.Path) { - longestPathEntry = entry + longestPathEntry = e } } } @@ -159,25 +148,21 @@ func addEntryToManifest(ctx *cli.Context, mhash, path, hash, ctype string) strin if longestPathEntry.Path != "" { // Load the child Manifest add the entry there newPath := path[len(longestPathEntry.Path):] - newHash := addEntryToManifest(ctx, longestPathEntry.Hash, newPath, hash, ctype) + newHash := addEntryToManifest(client, longestPathEntry.Hash, newPath, entry) // Replace the hash for parent Manifests newMRoot := &api.Manifest{} - for _, entry := range mroot.Entries { - if longestPathEntry.Path == entry.Path { - entry.Hash = newHash + for _, e := range mroot.Entries { + if longestPathEntry.Path == e.Path { + e.Hash = newHash } - newMRoot.Entries = append(newMRoot.Entries, entry) + newMRoot.Entries = append(newMRoot.Entries, e) } mroot = newMRoot } else { // Add the entry in the leaf Manifest - newEntry := api.ManifestEntry{ - Hash: hash, - Path: path, - ContentType: ctype, - } - mroot.Entries = append(mroot.Entries, newEntry) + entry.Path = path + mroot.Entries = append(mroot.Entries, entry) } newManifestHash, err := client.UploadManifest(mroot, isEncrypted) @@ -185,14 +170,16 @@ func addEntryToManifest(ctx *cli.Context, mhash, path, hash, ctype string) strin utils.Fatalf("Manifest upload failed: %v", err) } return newManifestHash - } -func updateEntryInManifest(ctx *cli.Context, mhash, path, hash, ctype string) string { - +// updateEntryInManifest updates an existing entry o path with a new one in the manifest with provided mhash +// finding the path recursively through all nested manifests. Argument isRoot is used for default +// entry update detection. If the updated entry has the same hash as the default entry, then the +// default entry in root manifest will be updated too. +// Returned values are the new manifest hash, hash of the entry that was replaced by the new entry and +// a a bool that is true if default entry is updated. +func updateEntryInManifest(client *swarm.Client, mhash, path string, entry api.ManifestEntry, isRoot bool) (newManifestHash, oldHash string, defaultEntryUpdated bool) { var ( - bzzapi = strings.TrimRight(ctx.GlobalString(SwarmApiFlag.Name), "/") - client = swarm.NewClient(bzzapi) newEntry = api.ManifestEntry{} longestPathEntry = api.ManifestEntry{} ) @@ -202,17 +189,18 @@ func updateEntryInManifest(ctx *cli.Context, mhash, path, hash, ctype string) st utils.Fatalf("Manifest download failed: %v", err) } - //TODO: check if the "hash" with which to update is valid and present in swarm - // See if we path is in this Manifest or do we have to dig deeper - for _, entry := range mroot.Entries { - if path == entry.Path { - newEntry = entry + for _, e := range mroot.Entries { + if path == e.Path { + newEntry = e + // keep the reference of the hash of the entry that should be replaced + // for default entry detection + oldHash = e.Hash } else { - if entry.ContentType == bzzManifestJSON { - prfxlen := strings.HasPrefix(path, entry.Path) + if e.ContentType == api.ManifestType { + prfxlen := strings.HasPrefix(path, e.Path) if prfxlen && len(path) > len(longestPathEntry.Path) { - longestPathEntry = entry + longestPathEntry = e } } } @@ -225,50 +213,50 @@ func updateEntryInManifest(ctx *cli.Context, mhash, path, hash, ctype string) st if longestPathEntry.Path != "" { // Load the child Manifest add the entry there newPath := path[len(longestPathEntry.Path):] - newHash := updateEntryInManifest(ctx, longestPathEntry.Hash, newPath, hash, ctype) + var newHash string + newHash, oldHash, _ = updateEntryInManifest(client, longestPathEntry.Hash, newPath, entry, false) // Replace the hash for parent Manifests newMRoot := &api.Manifest{} - for _, entry := range mroot.Entries { - if longestPathEntry.Path == entry.Path { - entry.Hash = newHash + for _, e := range mroot.Entries { + if longestPathEntry.Path == e.Path { + e.Hash = newHash } - newMRoot.Entries = append(newMRoot.Entries, entry) + newMRoot.Entries = append(newMRoot.Entries, e) } mroot = newMRoot } - if newEntry.Path != "" { + // update the manifest if the new entry is found and + // check if default entry should be updated + if newEntry.Path != "" || isRoot { // Replace the hash for leaf Manifest newMRoot := &api.Manifest{} - for _, entry := range mroot.Entries { - if newEntry.Path == entry.Path { - myEntry := api.ManifestEntry{ - Hash: hash, - Path: entry.Path, - ContentType: ctype, - } - newMRoot.Entries = append(newMRoot.Entries, myEntry) - } else { + for _, e := range mroot.Entries { + if newEntry.Path == e.Path { + entry.Path = e.Path newMRoot.Entries = append(newMRoot.Entries, entry) + } else if isRoot && e.Path == "" && e.Hash == oldHash { + entry.Path = e.Path + newMRoot.Entries = append(newMRoot.Entries, entry) + defaultEntryUpdated = true + } else { + newMRoot.Entries = append(newMRoot.Entries, e) } } mroot = newMRoot } - newManifestHash, err := client.UploadManifest(mroot, isEncrypted) + newManifestHash, err = client.UploadManifest(mroot, isEncrypted) if err != nil { utils.Fatalf("Manifest upload failed: %v", err) } - return newManifestHash + return newManifestHash, oldHash, defaultEntryUpdated } -func removeEntryFromManifest(ctx *cli.Context, mhash, path string) string { - +func removeEntryFromManifest(client *swarm.Client, mhash, path string) string { var ( - bzzapi = strings.TrimRight(ctx.GlobalString(SwarmApiFlag.Name), "/") - client = swarm.NewClient(bzzapi) entryToRemove = api.ManifestEntry{} longestPathEntry = api.ManifestEntry{} ) @@ -283,7 +271,7 @@ func removeEntryFromManifest(ctx *cli.Context, mhash, path string) string { if path == entry.Path { entryToRemove = entry } else { - if entry.ContentType == bzzManifestJSON { + if entry.ContentType == api.ManifestType { prfxlen := strings.HasPrefix(path, entry.Path) if prfxlen && len(path) > len(longestPathEntry.Path) { longestPathEntry = entry @@ -299,7 +287,7 @@ func removeEntryFromManifest(ctx *cli.Context, mhash, path string) string { if longestPathEntry.Path != "" { // Load the child Manifest remove the entry there newPath := path[len(longestPathEntry.Path):] - newHash := removeEntryFromManifest(ctx, longestPathEntry.Hash, newPath) + newHash := removeEntryFromManifest(client, longestPathEntry.Hash, newPath) // Replace the hash for parent Manifests newMRoot := &api.Manifest{} diff --git a/cmd/swarm/manifest_test.go b/cmd/swarm/manifest_test.go new file mode 100644 index 000000000000..08fe0b2eb7ab --- /dev/null +++ b/cmd/swarm/manifest_test.go @@ -0,0 +1,579 @@ +// Copyright 2018 The go-ethereum Authors +// This file is part of go-ethereum. +// +// go-ethereum is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// go-ethereum is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with go-ethereum. If not, see . + +package main + +import ( + "bytes" + "io/ioutil" + "os" + "path/filepath" + "testing" + + "github.com/ethereum/go-ethereum/swarm/api" + swarm "github.com/ethereum/go-ethereum/swarm/api/client" +) + +// TestManifestChange tests manifest add, update and remove +// cli commands without encryption. +func TestManifestChange(t *testing.T) { + testManifestChange(t, false) +} + +// TestManifestChange tests manifest add, update and remove +// cli commands with encryption enabled. +func TestManifestChangeEncrypted(t *testing.T) { + testManifestChange(t, true) +} + +// testManifestChange performs cli commands: +// - manifest add +// - manifest update +// - manifest remove +// on a manifest, testing the functionality of this +// comands on paths that are in root manifest or a nested one. +// Argument encrypt controls whether to use encryption or not. +func testManifestChange(t *testing.T, encrypt bool) { + t.Parallel() + cluster := newTestCluster(t, 1) + defer cluster.Shutdown() + + tmp, err := ioutil.TempDir("", "swarm-manifest-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmp) + + origDir := filepath.Join(tmp, "orig") + if err := os.Mkdir(origDir, 0777); err != nil { + t.Fatal(err) + } + + indexDataFilename := filepath.Join(origDir, "index.html") + err = ioutil.WriteFile(indexDataFilename, []byte("

Test

"), 0666) + if err != nil { + t.Fatal(err) + } + // Files paths robots.txt and robots.html share the same prefix "robots." + // which will result a manifest with a nested manifest under path "robots.". + // This will allow testing manifest changes on both root and nested manifest. + err = ioutil.WriteFile(filepath.Join(origDir, "robots.txt"), []byte("Disallow: /"), 0666) + if err != nil { + t.Fatal(err) + } + err = ioutil.WriteFile(filepath.Join(origDir, "robots.html"), []byte("No Robots Allowed"), 0666) + if err != nil { + t.Fatal(err) + } + err = ioutil.WriteFile(filepath.Join(origDir, "mutants.txt"), []byte("Frank\nMarcus"), 0666) + if err != nil { + t.Fatal(err) + } + + args := []string{ + "--bzzapi", + cluster.Nodes[0].URL, + "--recursive", + "--defaultpath", + indexDataFilename, + "up", + origDir, + } + if encrypt { + args = append(args, "--encrypt") + } + + origManifestHash := runSwarmExpectHash(t, args...) + + checkHashLength(t, origManifestHash, encrypt) + + client := swarm.NewClient(cluster.Nodes[0].URL) + + // upload a new file and use its manifest to add it the original manifest. + t.Run("add", func(t *testing.T) { + humansData := []byte("Ann\nBob") + humansDataFilename := filepath.Join(tmp, "humans.txt") + err = ioutil.WriteFile(humansDataFilename, humansData, 0666) + if err != nil { + t.Fatal(err) + } + + humansManifestHash := runSwarmExpectHash(t, + "--bzzapi", + cluster.Nodes[0].URL, + "up", + humansDataFilename, + ) + + newManifestHash := runSwarmExpectHash(t, + "--bzzapi", + cluster.Nodes[0].URL, + "manifest", + "add", + origManifestHash, + "humans.txt", + humansManifestHash, + ) + + checkHashLength(t, newManifestHash, encrypt) + + newManifest := downloadManifest(t, client, newManifestHash, encrypt) + + var found bool + for _, e := range newManifest.Entries { + if e.Path == "humans.txt" { + found = true + if e.Size != int64(len(humansData)) { + t.Errorf("expected humans.txt size %v, got %v", len(humansData), e.Size) + } + if e.ModTime.IsZero() { + t.Errorf("got zero mod time for humans.txt") + } + ct := "text/plain; charset=utf-8" + if e.ContentType != ct { + t.Errorf("expected content type %q, got %q", ct, e.ContentType) + } + break + } + } + if !found { + t.Fatal("no humans.txt in new manifest") + } + + checkFile(t, client, newManifestHash, "humans.txt", humansData) + }) + + // upload a new file and use its manifest to add it the original manifest, + // but ensure that the file will be in the nested manifest of the original one. + t.Run("add nested", func(t *testing.T) { + robotsData := []byte(`{"disallow": "/"}`) + robotsDataFilename := filepath.Join(tmp, "robots.json") + err = ioutil.WriteFile(robotsDataFilename, robotsData, 0666) + if err != nil { + t.Fatal(err) + } + + robotsManifestHash := runSwarmExpectHash(t, + "--bzzapi", + cluster.Nodes[0].URL, + "up", + robotsDataFilename, + ) + + newManifestHash := runSwarmExpectHash(t, + "--bzzapi", + cluster.Nodes[0].URL, + "manifest", + "add", + origManifestHash, + "robots.json", + robotsManifestHash, + ) + + checkHashLength(t, newManifestHash, encrypt) + + newManifest := downloadManifest(t, client, newManifestHash, encrypt) + + var found bool + loop: + for _, e := range newManifest.Entries { + if e.Path == "robots." { + nestedManifest := downloadManifest(t, client, e.Hash, encrypt) + for _, e := range nestedManifest.Entries { + if e.Path == "json" { + found = true + if e.Size != int64(len(robotsData)) { + t.Errorf("expected robots.json size %v, got %v", len(robotsData), e.Size) + } + if e.ModTime.IsZero() { + t.Errorf("got zero mod time for robots.json") + } + ct := "application/json" + if e.ContentType != ct { + t.Errorf("expected content type %q, got %q", ct, e.ContentType) + } + break loop + } + } + } + } + if !found { + t.Fatal("no robots.json in new manifest") + } + + checkFile(t, client, newManifestHash, "robots.json", robotsData) + }) + + // upload a new file and use its manifest to change the file it the original manifest. + t.Run("update", func(t *testing.T) { + indexData := []byte("

Ethereum Swarm

") + indexDataFilename := filepath.Join(tmp, "index.html") + err = ioutil.WriteFile(indexDataFilename, indexData, 0666) + if err != nil { + t.Fatal(err) + } + + indexManifestHash := runSwarmExpectHash(t, + "--bzzapi", + cluster.Nodes[0].URL, + "up", + indexDataFilename, + ) + + newManifestHash := runSwarmExpectHash(t, + "--bzzapi", + cluster.Nodes[0].URL, + "manifest", + "update", + origManifestHash, + "index.html", + indexManifestHash, + ) + + checkHashLength(t, newManifestHash, encrypt) + + newManifest := downloadManifest(t, client, newManifestHash, encrypt) + + var found bool + for _, e := range newManifest.Entries { + if e.Path == "index.html" { + found = true + if e.Size != int64(len(indexData)) { + t.Errorf("expected index.html size %v, got %v", len(indexData), e.Size) + } + if e.ModTime.IsZero() { + t.Errorf("got zero mod time for index.html") + } + ct := "text/html; charset=utf-8" + if e.ContentType != ct { + t.Errorf("expected content type %q, got %q", ct, e.ContentType) + } + break + } + } + if !found { + t.Fatal("no index.html in new manifest") + } + + checkFile(t, client, newManifestHash, "index.html", indexData) + + // check default entry change + checkFile(t, client, newManifestHash, "", indexData) + }) + + // upload a new file and use its manifest to change the file it the original manifest, + // but ensure that the file is in the nested manifest of the original one. + t.Run("update nested", func(t *testing.T) { + robotsData := []byte(`Only humans allowed!!!`) + robotsDataFilename := filepath.Join(tmp, "robots.html") + err = ioutil.WriteFile(robotsDataFilename, robotsData, 0666) + if err != nil { + t.Fatal(err) + } + + humansManifestHash := runSwarmExpectHash(t, + "--bzzapi", + cluster.Nodes[0].URL, + "up", + robotsDataFilename, + ) + + newManifestHash := runSwarmExpectHash(t, + "--bzzapi", + cluster.Nodes[0].URL, + "manifest", + "update", + origManifestHash, + "robots.html", + humansManifestHash, + ) + + checkHashLength(t, newManifestHash, encrypt) + + newManifest := downloadManifest(t, client, newManifestHash, encrypt) + + var found bool + loop: + for _, e := range newManifest.Entries { + if e.Path == "robots." { + nestedManifest := downloadManifest(t, client, e.Hash, encrypt) + for _, e := range nestedManifest.Entries { + if e.Path == "html" { + found = true + if e.Size != int64(len(robotsData)) { + t.Errorf("expected robots.html size %v, got %v", len(robotsData), e.Size) + } + if e.ModTime.IsZero() { + t.Errorf("got zero mod time for robots.html") + } + ct := "text/html; charset=utf-8" + if e.ContentType != ct { + t.Errorf("expected content type %q, got %q", ct, e.ContentType) + } + break loop + } + } + } + } + if !found { + t.Fatal("no robots.html in new manifest") + } + + checkFile(t, client, newManifestHash, "robots.html", robotsData) + }) + + // remove a file from the manifest. + t.Run("remove", func(t *testing.T) { + newManifestHash := runSwarmExpectHash(t, + "--bzzapi", + cluster.Nodes[0].URL, + "manifest", + "remove", + origManifestHash, + "mutants.txt", + ) + + checkHashLength(t, newManifestHash, encrypt) + + newManifest := downloadManifest(t, client, newManifestHash, encrypt) + + var found bool + for _, e := range newManifest.Entries { + if e.Path == "mutants.txt" { + found = true + break + } + } + if found { + t.Fatal("mutants.txt is not removed") + } + }) + + // remove a file from the manifest, but ensure that the file is in + // the nested manifest of the original one. + t.Run("remove nested", func(t *testing.T) { + newManifestHash := runSwarmExpectHash(t, + "--bzzapi", + cluster.Nodes[0].URL, + "manifest", + "remove", + origManifestHash, + "robots.html", + ) + + checkHashLength(t, newManifestHash, encrypt) + + newManifest := downloadManifest(t, client, newManifestHash, encrypt) + + var found bool + loop: + for _, e := range newManifest.Entries { + if e.Path == "robots." { + nestedManifest := downloadManifest(t, client, e.Hash, encrypt) + for _, e := range nestedManifest.Entries { + if e.Path == "html" { + found = true + break loop + } + } + } + } + if found { + t.Fatal("robots.html in not removed") + } + }) +} + +// TestNestedDefaultEntryUpdate tests if the default entry is updated +// if the file in nested manifest used for it is also updated. +func TestNestedDefaultEntryUpdate(t *testing.T) { + testNestedDefaultEntryUpdate(t, false) +} + +// TestNestedDefaultEntryUpdateEncrypted tests if the default entry +// of encrypted upload is updated if the file in nested manifest +// used for it is also updated. +func TestNestedDefaultEntryUpdateEncrypted(t *testing.T) { + testNestedDefaultEntryUpdate(t, true) +} + +func testNestedDefaultEntryUpdate(t *testing.T, encrypt bool) { + t.Parallel() + cluster := newTestCluster(t, 1) + defer cluster.Shutdown() + + tmp, err := ioutil.TempDir("", "swarm-manifest-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmp) + + origDir := filepath.Join(tmp, "orig") + if err := os.Mkdir(origDir, 0777); err != nil { + t.Fatal(err) + } + + indexData := []byte("

Test

") + indexDataFilename := filepath.Join(origDir, "index.html") + err = ioutil.WriteFile(indexDataFilename, indexData, 0666) + if err != nil { + t.Fatal(err) + } + // Add another file with common prefix as the default entry to test updates of + // default entry with nested manifests. + err = ioutil.WriteFile(filepath.Join(origDir, "index.txt"), []byte("Test"), 0666) + if err != nil { + t.Fatal(err) + } + + args := []string{ + "--bzzapi", + cluster.Nodes[0].URL, + "--recursive", + "--defaultpath", + indexDataFilename, + "up", + origDir, + } + if encrypt { + args = append(args, "--encrypt") + } + + origManifestHash := runSwarmExpectHash(t, args...) + + checkHashLength(t, origManifestHash, encrypt) + + client := swarm.NewClient(cluster.Nodes[0].URL) + + newIndexData := []byte("

Ethereum Swarm

") + newIndexDataFilename := filepath.Join(tmp, "index.html") + err = ioutil.WriteFile(newIndexDataFilename, newIndexData, 0666) + if err != nil { + t.Fatal(err) + } + + newIndexManifestHash := runSwarmExpectHash(t, + "--bzzapi", + cluster.Nodes[0].URL, + "up", + newIndexDataFilename, + ) + + newManifestHash := runSwarmExpectHash(t, + "--bzzapi", + cluster.Nodes[0].URL, + "manifest", + "update", + origManifestHash, + "index.html", + newIndexManifestHash, + ) + + checkHashLength(t, newManifestHash, encrypt) + + newManifest := downloadManifest(t, client, newManifestHash, encrypt) + + var found bool + for _, e := range newManifest.Entries { + if e.Path == "index." { + found = true + newManifest = downloadManifest(t, client, e.Hash, encrypt) + break + } + } + if !found { + t.Fatal("no index. path in new manifest") + } + + found = false + for _, e := range newManifest.Entries { + if e.Path == "html" { + found = true + if e.Size != int64(len(newIndexData)) { + t.Errorf("expected index.html size %v, got %v", len(newIndexData), e.Size) + } + if e.ModTime.IsZero() { + t.Errorf("got zero mod time for index.html") + } + ct := "text/html; charset=utf-8" + if e.ContentType != ct { + t.Errorf("expected content type %q, got %q", ct, e.ContentType) + } + break + } + } + if !found { + t.Fatal("no html in new manifest") + } + + checkFile(t, client, newManifestHash, "index.html", newIndexData) + + // check default entry change + checkFile(t, client, newManifestHash, "", newIndexData) +} + +func runSwarmExpectHash(t *testing.T, args ...string) (hash string) { + t.Helper() + hashRegexp := `[a-f\d]{64,128}` + up := runSwarm(t, args...) + _, matches := up.ExpectRegexp(hashRegexp) + up.ExpectExit() + + if len(matches) < 1 { + t.Fatal("no matches found") + } + return matches[0] +} + +func checkHashLength(t *testing.T, hash string, encrypted bool) { + t.Helper() + l := len(hash) + if encrypted && l != 128 { + t.Errorf("expected hash length 128, got %v", l) + } + if !encrypted && l != 64 { + t.Errorf("expected hash length 64, got %v", l) + } +} + +func downloadManifest(t *testing.T, client *swarm.Client, hash string, encrypted bool) (manifest *api.Manifest) { + t.Helper() + m, isEncrypted, err := client.DownloadManifest(hash) + if err != nil { + t.Fatal(err) + } + + if encrypted != isEncrypted { + t.Error("new manifest encryption flag is not correct") + } + return m +} + +func checkFile(t *testing.T, client *swarm.Client, hash, path string, expected []byte) { + t.Helper() + f, err := client.Download(hash, path) + if err != nil { + t.Fatal(err) + } + + got, err := ioutil.ReadAll(f) + if err != nil { + t.Fatal(err) + } + if !bytes.Equal(got, expected) { + t.Errorf("expected file content %q, got %q", expected, got) + } +} diff --git a/cmd/swarm/mru.go b/cmd/swarm/mru.go new file mode 100644 index 000000000000..6176b6d6c806 --- /dev/null +++ b/cmd/swarm/mru.go @@ -0,0 +1,169 @@ +// Copyright 2016 The go-ethereum Authors +// This file is part of go-ethereum. +// +// go-ethereum is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// go-ethereum is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with go-ethereum. If not, see . + +// Command resource allows the user to create and update signed mutable resource updates +package main + +import ( + "fmt" + "strconv" + "strings" + + "github.com/ethereum/go-ethereum/common/hexutil" + + "github.com/ethereum/go-ethereum/cmd/utils" + swarm "github.com/ethereum/go-ethereum/swarm/api/client" + "github.com/ethereum/go-ethereum/swarm/storage/mru" + "gopkg.in/urfave/cli.v1" +) + +func NewGenericSigner(ctx *cli.Context) mru.Signer { + return mru.NewGenericSigner(getPrivKey(ctx)) +} + +// swarm resource create [--name ] [--data <0x Hexdata> [--multihash=false]] +// swarm resource update <0x Hexdata> [--multihash=false] +// swarm resource info + +func resourceCreate(ctx *cli.Context) { + args := ctx.Args() + + var ( + bzzapi = strings.TrimRight(ctx.GlobalString(SwarmApiFlag.Name), "/") + client = swarm.NewClient(bzzapi) + multihash = ctx.Bool(SwarmResourceMultihashFlag.Name) + initialData = ctx.String(SwarmResourceDataOnCreateFlag.Name) + name = ctx.String(SwarmResourceNameFlag.Name) + ) + + if len(args) < 1 { + fmt.Println("Incorrect number of arguments") + cli.ShowCommandHelpAndExit(ctx, "create", 1) + return + } + signer := NewGenericSigner(ctx) + frequency, err := strconv.ParseUint(args[0], 10, 64) + if err != nil { + fmt.Printf("Frequency formatting error: %s\n", err.Error()) + cli.ShowCommandHelpAndExit(ctx, "create", 1) + return + } + + metadata := mru.ResourceMetadata{ + Name: name, + Frequency: frequency, + Owner: signer.Address(), + } + + var newResourceRequest *mru.Request + if initialData != "" { + initialDataBytes, err := hexutil.Decode(initialData) + if err != nil { + fmt.Printf("Error parsing data: %s\n", err.Error()) + cli.ShowCommandHelpAndExit(ctx, "create", 1) + return + } + newResourceRequest, err = mru.NewCreateUpdateRequest(&metadata) + if err != nil { + utils.Fatalf("Error creating new resource request: %s", err) + } + newResourceRequest.SetData(initialDataBytes, multihash) + if err = newResourceRequest.Sign(signer); err != nil { + utils.Fatalf("Error signing resource update: %s", err.Error()) + } + } else { + newResourceRequest, err = mru.NewCreateRequest(&metadata) + if err != nil { + utils.Fatalf("Error creating new resource request: %s", err) + } + } + + manifestAddress, err := client.CreateResource(newResourceRequest) + if err != nil { + utils.Fatalf("Error creating resource: %s", err.Error()) + return + } + fmt.Println(manifestAddress) // output manifest address to the user in a single line (useful for other commands to pick up) + +} + +func resourceUpdate(ctx *cli.Context) { + args := ctx.Args() + + var ( + bzzapi = strings.TrimRight(ctx.GlobalString(SwarmApiFlag.Name), "/") + client = swarm.NewClient(bzzapi) + multihash = ctx.Bool(SwarmResourceMultihashFlag.Name) + ) + + if len(args) < 2 { + fmt.Println("Incorrect number of arguments") + cli.ShowCommandHelpAndExit(ctx, "update", 1) + return + } + signer := NewGenericSigner(ctx) + manifestAddressOrDomain := args[0] + data, err := hexutil.Decode(args[1]) + if err != nil { + utils.Fatalf("Error parsing data: %s", err.Error()) + return + } + + // Retrieve resource status and metadata out of the manifest + updateRequest, err := client.GetResourceMetadata(manifestAddressOrDomain) + if err != nil { + utils.Fatalf("Error retrieving resource status: %s", err.Error()) + } + + // set the new data + updateRequest.SetData(data, multihash) + + // sign update + if err = updateRequest.Sign(signer); err != nil { + utils.Fatalf("Error signing resource update: %s", err.Error()) + } + + // post update + err = client.UpdateResource(updateRequest) + if err != nil { + utils.Fatalf("Error updating resource: %s", err.Error()) + return + } +} + +func resourceInfo(ctx *cli.Context) { + var ( + bzzapi = strings.TrimRight(ctx.GlobalString(SwarmApiFlag.Name), "/") + client = swarm.NewClient(bzzapi) + ) + args := ctx.Args() + if len(args) < 1 { + fmt.Println("Incorrect number of arguments.") + cli.ShowCommandHelpAndExit(ctx, "info", 1) + return + } + manifestAddressOrDomain := args[0] + metadata, err := client.GetResourceMetadata(manifestAddressOrDomain) + if err != nil { + utils.Fatalf("Error retrieving resource metadata: %s", err.Error()) + return + } + encodedMetadata, err := metadata.MarshalJSON() + if err != nil { + utils.Fatalf("Error encoding metadata to JSON for display:%s", err) + } + fmt.Println(string(encodedMetadata)) +} diff --git a/cmd/swarm/run_test.go b/cmd/swarm/run_test.go index a70c4686dd51..3e766dc10d58 100644 --- a/cmd/swarm/run_test.go +++ b/cmd/swarm/run_test.go @@ -17,12 +17,17 @@ package main import ( + "context" + "crypto/ecdsa" "fmt" "io/ioutil" "net" "os" + "path" "path/filepath" "runtime" + "sync" + "syscall" "testing" "time" @@ -172,14 +177,15 @@ func (c *testCluster) Cleanup() { } type testNode struct { - Name string - Addr string - URL string - Enode string - Dir string - IpcPath string - Client *rpc.Client - Cmd *cmdtest.TestCmd + Name string + Addr string + URL string + Enode string + Dir string + IpcPath string + PrivateKey *ecdsa.PrivateKey + Client *rpc.Client + Cmd *cmdtest.TestCmd } const testPassphrase = "swarm-test-passphrase" @@ -218,14 +224,12 @@ func existingTestNode(t *testing.T, dir string, bzzaccount string) *testNode { } // assign ports - httpPort, err := assignTCPPort() - if err != nil { - t.Fatal(err) - } - p2pPort, err := assignTCPPort() + ports, err := getAvailableTCPPorts(2) if err != nil { t.Fatal(err) } + p2pPort := ports[0] + httpPort := ports[1] // start the node node.Cmd = runSwarm(t, @@ -246,6 +250,17 @@ func existingTestNode(t *testing.T, dir string, bzzaccount string) *testNode { } }() + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + + // ensure that all ports have active listeners + // so that the next node will not get the same + // when calling getAvailableTCPPorts + err = waitTCPPorts(ctx, ports...) + if err != nil { + t.Fatal(err) + } + // wait for the node to start for start := time.Now(); time.Since(start) < 10*time.Second; time.Sleep(50 * time.Millisecond) { node.Client, err = rpc.Dial(conf.IPCEndpoint()) @@ -277,17 +292,19 @@ func existingTestNode(t *testing.T, dir string, bzzaccount string) *testNode { func newTestNode(t *testing.T, dir string) *testNode { conf, account := getTestAccount(t, dir) - node := &testNode{Dir: dir} + ks := keystore.NewKeyStore(path.Join(dir, "keystore"), 1<<18, 1) + + pk := decryptStoreAccount(ks, account.Address.Hex(), []string{testPassphrase}) + + node := &testNode{Dir: dir, PrivateKey: pk} // assign ports - httpPort, err := assignTCPPort() - if err != nil { - t.Fatal(err) - } - p2pPort, err := assignTCPPort() + ports, err := getAvailableTCPPorts(2) if err != nil { t.Fatal(err) } + p2pPort := ports[0] + httpPort := ports[1] // start the node node.Cmd = runSwarm(t, @@ -308,6 +325,17 @@ func newTestNode(t *testing.T, dir string) *testNode { } }() + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + + // ensure that all ports have active listeners + // so that the next node will not get the same + // when calling getAvailableTCPPorts + err = waitTCPPorts(ctx, ports...) + if err != nil { + t.Fatal(err) + } + // wait for the node to start for start := time.Now(); time.Since(start) < 10*time.Second; time.Sleep(50 * time.Millisecond) { node.Client, err = rpc.Dial(conf.IPCEndpoint()) @@ -343,15 +371,92 @@ func (n *testNode) Shutdown() { } } -func assignTCPPort() (string, error) { - l, err := net.Listen("tcp", "127.0.0.1:0") - if err != nil { - return "", err +// getAvailableTCPPorts returns a set of ports that +// nothing is listening on at the time. +// +// Function assignTCPPort cannot be called in sequence +// and guardantee that the same port will be returned in +// different calls as the listener is closed within the function, +// not after all listeners are started and selected unique +// available ports. +func getAvailableTCPPorts(count int) (ports []string, err error) { + for i := 0; i < count; i++ { + l, err := net.Listen("tcp", "127.0.0.1:0") + if err != nil { + return nil, err + } + // defer close in the loop to be sure the same port will not + // be selected in the next iteration + defer l.Close() + + _, port, err := net.SplitHostPort(l.Addr().String()) + if err != nil { + return nil, err + } + ports = append(ports, port) } - l.Close() - _, port, err := net.SplitHostPort(l.Addr().String()) - if err != nil { - return "", err + return ports, nil +} + +// waitTCPPorts blocks until tcp connections can be +// established on all provided ports. It runs all +// ports dialers in parallel, and returns the first +// encountered error. +// See waitTCPPort also. +func waitTCPPorts(ctx context.Context, ports ...string) error { + var err error + // mu locks err variable that is assigned in + // other goroutines + var mu sync.Mutex + + // cancel is canceling all goroutines + // when the firs error is returned + // to prevent unnecessary waiting + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + var wg sync.WaitGroup + for _, port := range ports { + wg.Add(1) + go func(port string) { + defer wg.Done() + + e := waitTCPPort(ctx, port) + + mu.Lock() + defer mu.Unlock() + if e != nil && err == nil { + err = e + cancel() + } + }(port) + } + wg.Wait() + + return err +} + +// waitTCPPort blocks until tcp connection can be established +// ona provided port. It has a 3 minute timeout as maximum, +// to prevent long waiting, but it can be shortened with +// a provided context instance. Dialer has a 10 second timeout +// in every iteration, and connection refused error will be +// retried in 100 milliseconds periods. +func waitTCPPort(ctx context.Context, port string) error { + ctx, cancel := context.WithTimeout(ctx, 3*time.Minute) + defer cancel() + + for { + c, err := (&net.Dialer{Timeout: 10 * time.Second}).DialContext(ctx, "tcp", "127.0.0.1:"+port) + if err != nil { + if operr, ok := err.(*net.OpError); ok { + if syserr, ok := operr.Err.(*os.SyscallError); ok && syserr.Err == syscall.ECONNREFUSED { + time.Sleep(100 * time.Millisecond) + continue + } + } + return err + } + return c.Close() } - return port, nil } diff --git a/cmd/swarm/swarm-smoke/upload_and_sync.go b/cmd/swarm/swarm-smoke/upload_and_sync.go index 7f9051e7fecb..d5300b63d9a5 100644 --- a/cmd/swarm/swarm-smoke/upload_and_sync.go +++ b/cmd/swarm/swarm-smoke/upload_and_sync.go @@ -37,8 +37,14 @@ import ( ) func generateEndpoints(scheme string, cluster string, from int, to int) { + if cluster == "prod" { + cluster = "" + } else { + cluster = cluster + "." + } + for port := from; port <= to; port++ { - endpoints = append(endpoints, fmt.Sprintf("%s://%v.%s.swarm-gateways.net", scheme, port, cluster)) + endpoints = append(endpoints, fmt.Sprintf("%s://%v.%sswarm-gateways.net", scheme, port, cluster)) } if includeLocalhost { @@ -71,8 +77,9 @@ func cliUploadAndSync(c *cli.Context) error { log.Info("uploaded successfully", "hash", hash, "digest", fmt.Sprintf("%x", fhash)) if filesize < 10 { - time.Sleep(15 * time.Second) + time.Sleep(35 * time.Second) } else { + time.Sleep(15 * time.Second) time.Sleep(2 * time.Duration(filesize) * time.Second) } @@ -102,7 +109,7 @@ func cliUploadAndSync(c *cli.Context) error { // fetch is getting the requested `hash` from the `endpoint` and compares it with the `original` file func fetch(hash string, endpoint string, original []byte, ruid string) error { log.Trace("sleeping", "ruid", ruid) - time.Sleep(1 * time.Second) + time.Sleep(5 * time.Second) log.Trace("http get request", "ruid", ruid, "api", endpoint, "hash", hash) res, err := http.Get(endpoint + "/bzz:/" + hash + "/") diff --git a/cmd/swarm/upload.go b/cmd/swarm/upload.go index 8ba0e7c5f0c0..9eae2a3f806b 100644 --- a/cmd/swarm/upload.go +++ b/cmd/swarm/upload.go @@ -98,6 +98,17 @@ func upload(ctx *cli.Context) { if !recursive { return "", errors.New("Argument is a directory and recursive upload is disabled") } + if defaultPath != "" { + // construct absolute default path + absDefaultPath, _ := filepath.Abs(defaultPath) + absFile, _ := filepath.Abs(file) + // make sure absolute directory ends with only one "/" + // to trim it from absolute default path and get relative default path + absFile = strings.TrimRight(absFile, "/") + "/" + if absDefaultPath != "" && absFile != "" && strings.HasPrefix(absDefaultPath, absFile) { + defaultPath = strings.TrimPrefix(absDefaultPath, absFile) + } + } return client.UploadDirectory(file, defaultPath, "", toEncrypt) } } else { diff --git a/cmd/swarm/upload_test.go b/cmd/swarm/upload_test.go index 2afc9b3a1186..c3199dadc6ba 100644 --- a/cmd/swarm/upload_test.go +++ b/cmd/swarm/upload_test.go @@ -273,3 +273,84 @@ func testCLISwarmUpRecursive(toEncrypt bool, t *testing.T) { } } } + +// TestCLISwarmUpDefaultPath tests swarm recursive upload with relative and absolute +// default paths and with encryption. +func TestCLISwarmUpDefaultPath(t *testing.T) { + testCLISwarmUpDefaultPath(false, false, t) + testCLISwarmUpDefaultPath(false, true, t) + testCLISwarmUpDefaultPath(true, false, t) + testCLISwarmUpDefaultPath(true, true, t) +} + +func testCLISwarmUpDefaultPath(toEncrypt bool, absDefaultPath bool, t *testing.T) { + cluster := newTestCluster(t, 1) + defer cluster.Shutdown() + + tmp, err := ioutil.TempDir("", "swarm-defaultpath-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmp) + + err = ioutil.WriteFile(filepath.Join(tmp, "index.html"), []byte("

Test

"), 0666) + if err != nil { + t.Fatal(err) + } + err = ioutil.WriteFile(filepath.Join(tmp, "robots.txt"), []byte("Disallow: /"), 0666) + if err != nil { + t.Fatal(err) + } + + defaultPath := "index.html" + if absDefaultPath { + defaultPath = filepath.Join(tmp, defaultPath) + } + + args := []string{ + "--bzzapi", + cluster.Nodes[0].URL, + "--recursive", + "--defaultpath", + defaultPath, + "up", + tmp, + } + if toEncrypt { + args = append(args, "--encrypt") + } + + up := runSwarm(t, args...) + hashRegexp := `[a-f\d]{64,128}` + _, matches := up.ExpectRegexp(hashRegexp) + up.ExpectExit() + hash := matches[0] + + client := swarm.NewClient(cluster.Nodes[0].URL) + + m, isEncrypted, err := client.DownloadManifest(hash) + if err != nil { + t.Fatal(err) + } + + if toEncrypt != isEncrypted { + t.Error("downloaded manifest is not encrypted") + } + + var found bool + var entriesCount int + for _, e := range m.Entries { + entriesCount++ + if e.Path == "" { + found = true + } + } + + if !found { + t.Error("manifest default entry was not found") + } + + if entriesCount != 3 { + t.Errorf("manifest contains %v entries, expected %v", entriesCount, 3) + } +} diff --git a/cmd/utils/flags.go b/cmd/utils/flags.go index a191e4430660..c9a936dd5f16 100644 --- a/cmd/utils/flags.go +++ b/cmd/utils/flags.go @@ -24,7 +24,6 @@ import ( "math/big" "os" "path/filepath" - "runtime" "strconv" "strings" "time" @@ -98,7 +97,7 @@ func NewApp(gitCommit, usage string) *cli.App { app.Author = "" //app.Authors = nil app.Email = "" - app.Version = params.Version + app.Version = params.VersionWithMeta if len(gitCommit) >= 8 { app.Version += "-" + gitCommit[:8] } @@ -158,14 +157,6 @@ var ( Usage: "Document Root for HTTPClient file scheme", Value: DirectoryString{homeDir()}, } - FastSyncFlag = cli.BoolFlag{ - Name: "fast", - Usage: "Enable fast syncing through state downloads (replaced by --syncmode)", - } - LightModeFlag = cli.BoolFlag{ - Name: "light", - Usage: "Enable light client mode (replaced by --syncmode)", - } defaultSyncMode = eth.DefaultConfig.SyncMode SyncModeFlag = TextMarshalerFlag{ Name: "syncmode", @@ -193,7 +184,7 @@ var ( } // Dashboard settings DashboardEnabledFlag = cli.BoolFlag{ - Name: "dashboard", + Name: metrics.DashboardEnabledFlag, Usage: "Enable the dashboard", } DashboardAddrFlag = cli.StringFlag{ @@ -242,6 +233,10 @@ var ( Value: eth.DefaultConfig.Ethash.DatasetsOnDisk, } // Transaction pool settings + TxPoolLocalsFlag = cli.StringFlag{ + Name: "txpool.locals", + Usage: "Comma separated accounts to treat as locals (no flush, priority inclusion)", + } TxPoolNoLocalsFlag = cli.BoolFlag{ Name: "txpool.nolocals", Usage: "Disables price exemptions for locally submitted transactions", @@ -318,28 +313,61 @@ var ( Usage: "Enable mining", } MinerThreadsFlag = cli.IntFlag{ - Name: "minerthreads", + Name: "miner.threads", Usage: "Number of CPU threads to use for mining", - Value: runtime.NumCPU(), + Value: 0, + } + MinerLegacyThreadsFlag = cli.IntFlag{ + Name: "minerthreads", + Usage: "Number of CPU threads to use for mining (deprecated, use --miner.threads)", + Value: 0, + } + MinerNotifyFlag = cli.StringFlag{ + Name: "miner.notify", + Usage: "Comma separated HTTP URL list to notify of new work packages", } - TargetGasLimitFlag = cli.Uint64Flag{ + MinerGasTargetFlag = cli.Uint64Flag{ + Name: "miner.gastarget", + Usage: "Target gas floor for mined blocks", + Value: params.GenesisGasLimit, + } + MinerLegacyGasTargetFlag = cli.Uint64Flag{ Name: "targetgaslimit", - Usage: "Target gas limit sets the artificial target gas floor for the blocks to mine", + Usage: "Target gas floor for mined blocks (deprecated, use --miner.gastarget)", Value: params.GenesisGasLimit, } - EtherbaseFlag = cli.StringFlag{ + MinerGasPriceFlag = BigFlag{ + Name: "miner.gasprice", + Usage: "Minimal gas price for mining a transactions", + Value: eth.DefaultConfig.MinerGasPrice, + } + MinerLegacyGasPriceFlag = BigFlag{ + Name: "gasprice", + Usage: "Minimal gas price for mining a transactions (deprecated, use --miner.gasprice)", + Value: eth.DefaultConfig.MinerGasPrice, + } + MinerEtherbaseFlag = cli.StringFlag{ + Name: "miner.etherbase", + Usage: "Public address for block mining rewards (default = first account)", + Value: "0", + } + MinerLegacyEtherbaseFlag = cli.StringFlag{ Name: "etherbase", - Usage: "Public address for block mining rewards (default = first account created)", + Usage: "Public address for block mining rewards (default = first account, deprecated, use --miner.etherbase)", Value: "0", } - GasPriceFlag = BigFlag{ - Name: "gasprice", - Usage: "Minimal gas price to accept for mining a transactions", - Value: eth.DefaultConfig.GasPrice, + MinerExtraDataFlag = cli.StringFlag{ + Name: "miner.extradata", + Usage: "Block extra data set by the miner (default = client version)", } - ExtraDataFlag = cli.StringFlag{ + MinerLegacyExtraDataFlag = cli.StringFlag{ Name: "extradata", - Usage: "Block extra data set by the miner (default = client version)", + Usage: "Block extra data set by the miner (default = client version, deprecated, use --miner.extradata)", + } + MinerRecommitIntervalFlag = cli.DurationFlag{ + Name: "miner.recommit", + Usage: "Time interval to recreate the block being mined.", + Value: eth.DefaultConfig.MinerRecommit, } // Account settings UnlockedAccountFlag = cli.StringFlag{ @@ -644,8 +672,7 @@ func setBootstrapNodes(ctx *cli.Context, cfg *p2p.Config) { for _, url := range urls { node, err := discover.ParseNode(url) if err != nil { - log.Error("Bootstrap URL invalid", "enode", url, "err", err) - continue + log.Crit("Bootstrap URL invalid", "enode", url, "err", err) } cfg.BootstrapNodes = append(cfg.BootstrapNodes, node) } @@ -811,10 +838,19 @@ func MakeAddress(ks *keystore.KeyStore, account string) (accounts.Account, error // setEtherbase retrieves the etherbase either from the directly specified // command line flags or from the keystore if CLI indexed. func setEtherbase(ctx *cli.Context, ks *keystore.KeyStore, cfg *eth.Config) { - if ctx.GlobalIsSet(EtherbaseFlag.Name) { - account, err := MakeAddress(ks, ctx.GlobalString(EtherbaseFlag.Name)) + // Extract the current etherbase, new flag overriding legacy one + var etherbase string + if ctx.GlobalIsSet(MinerLegacyEtherbaseFlag.Name) { + etherbase = ctx.GlobalString(MinerLegacyEtherbaseFlag.Name) + } + if ctx.GlobalIsSet(MinerEtherbaseFlag.Name) { + etherbase = ctx.GlobalString(MinerEtherbaseFlag.Name) + } + // Convert the etherbase into an address and configure it + if etherbase != "" { + account, err := MakeAddress(ks, etherbase) if err != nil { - Fatalf("Option %q: %v", EtherbaseFlag.Name, err) + Fatalf("Invalid miner etherbase: %v", err) } cfg.Etherbase = account.Address } @@ -845,7 +881,7 @@ func SetP2PConfig(ctx *cli.Context, cfg *p2p.Config) { setBootstrapNodes(ctx, cfg) setBootstrapNodesV5(ctx, cfg) - lightClient := ctx.GlobalBool(LightModeFlag.Name) || ctx.GlobalString(SyncModeFlag.Name) == "light" + lightClient := ctx.GlobalString(SyncModeFlag.Name) == "light" lightServer := ctx.GlobalInt(LightServFlag.Name) != 0 lightPeers := ctx.GlobalInt(LightPeersFlag.Name) @@ -945,6 +981,16 @@ func setGPO(ctx *cli.Context, cfg *gasprice.Config) { } func setTxPool(ctx *cli.Context, cfg *core.TxPoolConfig) { + if ctx.GlobalIsSet(TxPoolLocalsFlag.Name) { + locals := strings.Split(ctx.GlobalString(TxPoolLocalsFlag.Name), ",") + for _, account := range locals { + if trimmed := strings.TrimSpace(account); !common.IsHexAddress(trimmed) { + Fatalf("Invalid account in --txpool.locals: %s", trimmed) + } else { + cfg.Locals = append(cfg.Locals, common.HexToAddress(account)) + } + } + } if ctx.GlobalIsSet(TxPoolNoLocalsFlag.Name) { cfg.NoLocals = ctx.GlobalBool(TxPoolNoLocalsFlag.Name) } @@ -998,7 +1044,7 @@ func setEthash(ctx *cli.Context, cfg *eth.Config) { } } -// checkExclusive verifies that only a single isntance of the provided flags was +// checkExclusive verifies that only a single instance of the provided flags was // set by the user. Each flag might optionally be followed by a string type to // specialize it further. func checkExclusive(ctx *cli.Context, args ...interface{}) { @@ -1050,8 +1096,6 @@ func SetShhConfig(ctx *cli.Context, stack *node.Node, cfg *whisper.Config) { func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *eth.Config) { // Avoid conflicting network flags checkExclusive(ctx, DeveloperFlag, TestnetFlag, RinkebyFlag) - checkExclusive(ctx, FastSyncFlag, LightModeFlag, SyncModeFlag) - checkExclusive(ctx, LightServFlag, LightModeFlag) checkExclusive(ctx, LightServFlag, SyncModeFlag, "light") ks := stack.AccountManager().Backends(keystore.KeyStoreType)[0].(*keystore.KeyStore) @@ -1060,13 +1104,8 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *eth.Config) { setTxPool(ctx, &cfg.TxPool) setEthash(ctx, cfg) - switch { - case ctx.GlobalIsSet(SyncModeFlag.Name): + if ctx.GlobalIsSet(SyncModeFlag.Name) { cfg.SyncMode = *GlobalTextMarshaler(ctx, SyncModeFlag.Name).(*downloader.SyncMode) - case ctx.GlobalBool(FastSyncFlag.Name): - cfg.SyncMode = downloader.FastSync - case ctx.GlobalBool(LightModeFlag.Name): - cfg.SyncMode = downloader.LightSync } if ctx.GlobalIsSet(LightServFlag.Name) { cfg.LightServ = ctx.GlobalInt(LightServFlag.Name) @@ -1091,17 +1130,32 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *eth.Config) { if ctx.GlobalIsSet(CacheFlag.Name) || ctx.GlobalIsSet(CacheGCFlag.Name) { cfg.TrieCache = ctx.GlobalInt(CacheFlag.Name) * ctx.GlobalInt(CacheGCFlag.Name) / 100 } + if ctx.GlobalIsSet(MinerLegacyThreadsFlag.Name) { + cfg.MinerThreads = ctx.GlobalInt(MinerLegacyThreadsFlag.Name) + } if ctx.GlobalIsSet(MinerThreadsFlag.Name) { cfg.MinerThreads = ctx.GlobalInt(MinerThreadsFlag.Name) } + if ctx.GlobalIsSet(MinerNotifyFlag.Name) { + cfg.MinerNotify = strings.Split(ctx.GlobalString(MinerNotifyFlag.Name), ",") + } if ctx.GlobalIsSet(DocRootFlag.Name) { cfg.DocRoot = ctx.GlobalString(DocRootFlag.Name) } - if ctx.GlobalIsSet(ExtraDataFlag.Name) { - cfg.ExtraData = []byte(ctx.GlobalString(ExtraDataFlag.Name)) + if ctx.GlobalIsSet(MinerLegacyExtraDataFlag.Name) { + cfg.MinerExtraData = []byte(ctx.GlobalString(MinerLegacyExtraDataFlag.Name)) + } + if ctx.GlobalIsSet(MinerExtraDataFlag.Name) { + cfg.MinerExtraData = []byte(ctx.GlobalString(MinerExtraDataFlag.Name)) } - if ctx.GlobalIsSet(GasPriceFlag.Name) { - cfg.GasPrice = GlobalBig(ctx, GasPriceFlag.Name) + if ctx.GlobalIsSet(MinerLegacyGasPriceFlag.Name) { + cfg.MinerGasPrice = GlobalBig(ctx, MinerLegacyGasPriceFlag.Name) + } + if ctx.GlobalIsSet(MinerGasPriceFlag.Name) { + cfg.MinerGasPrice = GlobalBig(ctx, MinerGasPriceFlag.Name) + } + if ctx.GlobalIsSet(MinerRecommitIntervalFlag.Name) { + cfg.MinerRecommit = ctx.Duration(MinerRecommitIntervalFlag.Name) } if ctx.GlobalIsSet(VMEnableDebugFlag.Name) { // TODO(fjl): force-enable this in --dev mode @@ -1143,8 +1197,8 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *eth.Config) { log.Info("Using developer account", "address", developer.Address) cfg.Genesis = core.DeveloperGenesisBlock(uint64(ctx.GlobalInt(DeveloperPeriodFlag.Name)), developer.Address) - if !ctx.GlobalIsSet(GasPriceFlag.Name) { - cfg.GasPrice = big.NewInt(1) + if !ctx.GlobalIsSet(MinerGasPriceFlag.Name) && !ctx.GlobalIsSet(MinerLegacyGasPriceFlag.Name) { + cfg.MinerGasPrice = big.NewInt(1) } } // TODO(fjl): move trie cache generations into config @@ -1185,7 +1239,7 @@ func RegisterEthService(stack *node.Node, cfg *eth.Config) { // RegisterDashboardService adds a dashboard to the stack. func RegisterDashboardService(stack *node.Node, cfg *dashboard.Config, commit string) { stack.Register(func(ctx *node.ServiceContext) (node.Service, error) { - return dashboard.New(cfg, commit) + return dashboard.New(cfg, commit, ctx.ResolvePath("logs")), nil }) } @@ -1199,7 +1253,7 @@ func RegisterShhService(stack *node.Node, cfg *whisper.Config) { } // RegisterEthStatsService configures the Ethereum Stats daemon and adds it to -// th egiven node. +// the given node. func RegisterEthStatsService(stack *node.Node, url string) { if err := stack.Register(func(ctx *node.ServiceContext) (node.Service, error) { // Retrieve both eth and les services @@ -1218,7 +1272,10 @@ func RegisterEthStatsService(stack *node.Node, url string) { // SetupNetwork configures the system for either the main net or some test network. func SetupNetwork(ctx *cli.Context) { // TODO(fjl): move target gas limit into config - params.TargetGasLimit = ctx.GlobalUint64(TargetGasLimitFlag.Name) + params.TargetGasLimit = ctx.GlobalUint64(MinerLegacyGasTargetFlag.Name) + if ctx.GlobalIsSet(MinerGasTargetFlag.Name) { + params.TargetGasLimit = ctx.GlobalUint64(MinerGasTargetFlag.Name) + } } func SetupMetrics(ctx *cli.Context) { @@ -1249,7 +1306,7 @@ func MakeChainDatabase(ctx *cli.Context, stack *node.Node) ethdb.Database { handles = makeDatabaseHandles() ) name := "chaindata" - if ctx.GlobalBool(LightModeFlag.Name) { + if ctx.GlobalString(SyncModeFlag.Name) == "light" { name = "lightchaindata" } chainDb, err := stack.OpenDatabase(name, cache, handles) @@ -1294,7 +1351,7 @@ func MakeChain(ctx *cli.Context, stack *node.Node) (chain *core.BlockChain, chai DatasetDir: stack.ResolvePath(eth.DefaultConfig.Ethash.DatasetDir), DatasetsInMem: eth.DefaultConfig.Ethash.DatasetsInMem, DatasetsOnDisk: eth.DefaultConfig.Ethash.DatasetsOnDisk, - }) + }, nil) } } if gcmode := ctx.GlobalString(GCModeFlag.Name); gcmode != "full" && gcmode != "archive" { diff --git a/common/mclock/mclock.go b/common/mclock/mclock.go index 02608d17b0be..dcac59c6ceaa 100644 --- a/common/mclock/mclock.go +++ b/common/mclock/mclock.go @@ -30,3 +30,34 @@ type AbsTime time.Duration func Now() AbsTime { return AbsTime(monotime.Now()) } + +// Add returns t + d. +func (t AbsTime) Add(d time.Duration) AbsTime { + return t + AbsTime(d) +} + +// Clock interface makes it possible to replace the monotonic system clock with +// a simulated clock. +type Clock interface { + Now() AbsTime + Sleep(time.Duration) + After(time.Duration) <-chan time.Time +} + +// System implements Clock using the system clock. +type System struct{} + +// Now implements Clock. +func (System) Now() AbsTime { + return AbsTime(monotime.Now()) +} + +// Sleep implements Clock. +func (System) Sleep(d time.Duration) { + time.Sleep(d) +} + +// After implements Clock. +func (System) After(d time.Duration) <-chan time.Time { + return time.After(d) +} diff --git a/common/mclock/simclock.go b/common/mclock/simclock.go new file mode 100644 index 000000000000..e014f56150ea --- /dev/null +++ b/common/mclock/simclock.go @@ -0,0 +1,129 @@ +// Copyright 2018 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package mclock + +import ( + "sync" + "time" +) + +// Simulated implements a virtual Clock for reproducible time-sensitive tests. It +// simulates a scheduler on a virtual timescale where actual processing takes zero time. +// +// The virtual clock doesn't advance on its own, call Run to advance it and execute timers. +// Since there is no way to influence the Go scheduler, testing timeout behaviour involving +// goroutines needs special care. A good way to test such timeouts is as follows: First +// perform the action that is supposed to time out. Ensure that the timer you want to test +// is created. Then run the clock until after the timeout. Finally observe the effect of +// the timeout using a channel or semaphore. +type Simulated struct { + now AbsTime + scheduled []event + mu sync.RWMutex + cond *sync.Cond +} + +type event struct { + do func() + at AbsTime +} + +// Run moves the clock by the given duration, executing all timers before that duration. +func (s *Simulated) Run(d time.Duration) { + s.mu.Lock() + defer s.mu.Unlock() + s.init() + + end := s.now + AbsTime(d) + for len(s.scheduled) > 0 { + ev := s.scheduled[0] + if ev.at > end { + break + } + s.now = ev.at + ev.do() + s.scheduled = s.scheduled[1:] + } + s.now = end +} + +func (s *Simulated) ActiveTimers() int { + s.mu.RLock() + defer s.mu.RUnlock() + + return len(s.scheduled) +} + +func (s *Simulated) WaitForTimers(n int) { + s.mu.Lock() + defer s.mu.Unlock() + s.init() + + for len(s.scheduled) < n { + s.cond.Wait() + } +} + +// Now implements Clock. +func (s *Simulated) Now() AbsTime { + s.mu.RLock() + defer s.mu.RUnlock() + + return s.now +} + +// Sleep implements Clock. +func (s *Simulated) Sleep(d time.Duration) { + <-s.After(d) +} + +// After implements Clock. +func (s *Simulated) After(d time.Duration) <-chan time.Time { + after := make(chan time.Time, 1) + s.insert(d, func() { + after <- (time.Time{}).Add(time.Duration(s.now)) + }) + return after +} + +func (s *Simulated) insert(d time.Duration, do func()) { + s.mu.Lock() + defer s.mu.Unlock() + s.init() + + at := s.now + AbsTime(d) + l, h := 0, len(s.scheduled) + ll := h + for l != h { + m := (l + h) / 2 + if at < s.scheduled[m].at { + h = m + } else { + l = m + 1 + } + } + s.scheduled = append(s.scheduled, event{}) + copy(s.scheduled[l+1:], s.scheduled[l:ll]) + s.scheduled[l] = event{do: do, at: at} + s.cond.Broadcast() +} + +func (s *Simulated) init() { + if s.cond == nil { + s.cond = sync.NewCond(&s.mu) + } +} diff --git a/common/prque/prque.go b/common/prque/prque.go new file mode 100755 index 000000000000..9fd31a2e5d1f --- /dev/null +++ b/common/prque/prque.go @@ -0,0 +1,57 @@ +// This is a duplicated and slightly modified version of "gopkg.in/karalabe/cookiejar.v2/collections/prque". + +package prque + +import ( + "container/heap" +) + +// Priority queue data structure. +type Prque struct { + cont *sstack +} + +// Creates a new priority queue. +func New(setIndex setIndexCallback) *Prque { + return &Prque{newSstack(setIndex)} +} + +// Pushes a value with a given priority into the queue, expanding if necessary. +func (p *Prque) Push(data interface{}, priority int64) { + heap.Push(p.cont, &item{data, priority}) +} + +// Pops the value with the greates priority off the stack and returns it. +// Currently no shrinking is done. +func (p *Prque) Pop() (interface{}, int64) { + item := heap.Pop(p.cont).(*item) + return item.value, item.priority +} + +// Pops only the item from the queue, dropping the associated priority value. +func (p *Prque) PopItem() interface{} { + return heap.Pop(p.cont).(*item).value +} + +// Remove removes the element with the given index. +func (p *Prque) Remove(i int) interface{} { + if i < 0 { + return nil + } + return heap.Remove(p.cont, i) +} + +// Checks whether the priority queue is empty. +func (p *Prque) Empty() bool { + return p.cont.Len() == 0 +} + +// Returns the number of element in the priority queue. +func (p *Prque) Size() int { + return p.cont.Len() +} + +// Clears the contents of the priority queue. +func (p *Prque) Reset() { + *p = *New(p.cont.setIndex) +} diff --git a/common/prque/sstack.go b/common/prque/sstack.go new file mode 100755 index 000000000000..4875dae99d96 --- /dev/null +++ b/common/prque/sstack.go @@ -0,0 +1,106 @@ +// This is a duplicated and slightly modified version of "gopkg.in/karalabe/cookiejar.v2/collections/prque". + +package prque + +// The size of a block of data +const blockSize = 4096 + +// A prioritized item in the sorted stack. +// +// Note: priorities can "wrap around" the int64 range, a comes before b if (a.priority - b.priority) > 0. +// The difference between the lowest and highest priorities in the queue at any point should be less than 2^63. +type item struct { + value interface{} + priority int64 +} + +// setIndexCallback is called when the element is moved to a new index. +// Providing setIndexCallback is optional, it is needed only if the application needs +// to delete elements other than the top one. +type setIndexCallback func(a interface{}, i int) + +// Internal sortable stack data structure. Implements the Push and Pop ops for +// the stack (heap) functionality and the Len, Less and Swap methods for the +// sortability requirements of the heaps. +type sstack struct { + setIndex setIndexCallback + size int + capacity int + offset int + + blocks [][]*item + active []*item +} + +// Creates a new, empty stack. +func newSstack(setIndex setIndexCallback) *sstack { + result := new(sstack) + result.setIndex = setIndex + result.active = make([]*item, blockSize) + result.blocks = [][]*item{result.active} + result.capacity = blockSize + return result +} + +// Pushes a value onto the stack, expanding it if necessary. Required by +// heap.Interface. +func (s *sstack) Push(data interface{}) { + if s.size == s.capacity { + s.active = make([]*item, blockSize) + s.blocks = append(s.blocks, s.active) + s.capacity += blockSize + s.offset = 0 + } else if s.offset == blockSize { + s.active = s.blocks[s.size/blockSize] + s.offset = 0 + } + if s.setIndex != nil { + s.setIndex(data.(*item).value, s.size) + } + s.active[s.offset] = data.(*item) + s.offset++ + s.size++ +} + +// Pops a value off the stack and returns it. Currently no shrinking is done. +// Required by heap.Interface. +func (s *sstack) Pop() (res interface{}) { + s.size-- + s.offset-- + if s.offset < 0 { + s.offset = blockSize - 1 + s.active = s.blocks[s.size/blockSize] + } + res, s.active[s.offset] = s.active[s.offset], nil + if s.setIndex != nil { + s.setIndex(res.(*item).value, -1) + } + return +} + +// Returns the length of the stack. Required by sort.Interface. +func (s *sstack) Len() int { + return s.size +} + +// Compares the priority of two elements of the stack (higher is first). +// Required by sort.Interface. +func (s *sstack) Less(i, j int) bool { + return (s.blocks[i/blockSize][i%blockSize].priority - s.blocks[j/blockSize][j%blockSize].priority) > 0 +} + +// Swaps two elements in the stack. Required by sort.Interface. +func (s *sstack) Swap(i, j int) { + ib, io, jb, jo := i/blockSize, i%blockSize, j/blockSize, j%blockSize + a, b := s.blocks[jb][jo], s.blocks[ib][io] + if s.setIndex != nil { + s.setIndex(a.value, i) + s.setIndex(b.value, j) + } + s.blocks[ib][io], s.blocks[jb][jo] = a, b +} + +// Resets the stack, effectively clearing its contents. +func (s *sstack) Reset() { + *s = *newSstack(s.setIndex) +} diff --git a/common/types.go b/common/types.go index 4d374ad246b4..71fe5c95cd12 100644 --- a/common/types.go +++ b/common/types.go @@ -17,6 +17,7 @@ package common import ( + "database/sql/driver" "encoding/hex" "encoding/json" "fmt" @@ -31,7 +32,9 @@ import ( // Lengths of hashes and addresses in bytes. const ( - HashLength = 32 + // HashLength is the expected length of the hash + HashLength = 32 + // AddressLength is the expected length of the adddress AddressLength = 20 ) @@ -120,6 +123,24 @@ func (h Hash) Generate(rand *rand.Rand, size int) reflect.Value { return reflect.ValueOf(h) } +// Scan implements Scanner for database/sql. +func (h *Hash) Scan(src interface{}) error { + srcB, ok := src.([]byte) + if !ok { + return fmt.Errorf("can't scan %T into Hash", src) + } + if len(srcB) != HashLength { + return fmt.Errorf("can't scan []byte of len %d into Hash, want %d", len(srcB), HashLength) + } + copy(h[:], srcB) + return nil +} + +// Value implements valuer for database/sql. +func (h Hash) Value() (driver.Value, error) { + return h[:], nil +} + // UnprefixedHash allows marshaling a Hash without 0x prefix. type UnprefixedHash Hash @@ -229,6 +250,24 @@ func (a *Address) UnmarshalJSON(input []byte) error { return hexutil.UnmarshalFixedJSON(addressT, input, a[:]) } +// Scan implements Scanner for database/sql. +func (a *Address) Scan(src interface{}) error { + srcB, ok := src.([]byte) + if !ok { + return fmt.Errorf("can't scan %T into Address", src) + } + if len(srcB) != AddressLength { + return fmt.Errorf("can't scan []byte of len %d into Address, want %d", len(srcB), AddressLength) + } + copy(a[:], srcB) + return nil +} + +// Value implements valuer for database/sql. +func (a Address) Value() (driver.Value, error) { + return a[:], nil +} + // UnprefixedAddress allows marshaling an Address without 0x prefix. type UnprefixedAddress Address diff --git a/common/types_test.go b/common/types_test.go index 9e0c5be3ad2e..7095ccd0191b 100644 --- a/common/types_test.go +++ b/common/types_test.go @@ -17,9 +17,10 @@ package common import ( + "database/sql/driver" "encoding/json" - "math/big" + "reflect" "strings" "testing" ) @@ -193,3 +194,180 @@ func TestMixedcaseAccount_Address(t *testing.T) { } } + +func TestHash_Scan(t *testing.T) { + type args struct { + src interface{} + } + tests := []struct { + name string + args args + wantErr bool + }{ + { + name: "working scan", + args: args{src: []byte{ + 0xb2, 0x6f, 0x2b, 0x34, 0x2a, 0xab, 0x24, 0xbc, 0xf6, 0x3e, + 0xa2, 0x18, 0xc6, 0xa9, 0x27, 0x4d, 0x30, 0xab, 0x9a, 0x15, + 0xa2, 0x18, 0xc6, 0xa9, 0x27, 0x4d, 0x30, 0xab, 0x9a, 0x15, + 0x10, 0x00, + }}, + wantErr: false, + }, + { + name: "non working scan", + args: args{src: int64(1234567890)}, + wantErr: true, + }, + { + name: "invalid length scan", + args: args{src: []byte{ + 0xb2, 0x6f, 0x2b, 0x34, 0x2a, 0xab, 0x24, 0xbc, 0xf6, 0x3e, + 0xa2, 0x18, 0xc6, 0xa9, 0x27, 0x4d, 0x30, 0xab, 0x9a, 0x15, + 0xa2, 0x18, 0xc6, 0xa9, 0x27, 0x4d, 0x30, 0xab, 0x9a, 0x15, + }}, + wantErr: true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + h := &Hash{} + if err := h.Scan(tt.args.src); (err != nil) != tt.wantErr { + t.Errorf("Hash.Scan() error = %v, wantErr %v", err, tt.wantErr) + } + + if !tt.wantErr { + for i := range h { + if h[i] != tt.args.src.([]byte)[i] { + t.Errorf( + "Hash.Scan() didn't scan the %d src correctly (have %X, want %X)", + i, h[i], tt.args.src.([]byte)[i], + ) + } + } + } + }) + } +} + +func TestHash_Value(t *testing.T) { + b := []byte{ + 0xb2, 0x6f, 0x2b, 0x34, 0x2a, 0xab, 0x24, 0xbc, 0xf6, 0x3e, + 0xa2, 0x18, 0xc6, 0xa9, 0x27, 0x4d, 0x30, 0xab, 0x9a, 0x15, + 0xa2, 0x18, 0xc6, 0xa9, 0x27, 0x4d, 0x30, 0xab, 0x9a, 0x15, + 0x10, 0x00, + } + var usedH Hash + usedH.SetBytes(b) + tests := []struct { + name string + h Hash + want driver.Value + wantErr bool + }{ + { + name: "Working value", + h: usedH, + want: b, + wantErr: false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, err := tt.h.Value() + if (err != nil) != tt.wantErr { + t.Errorf("Hash.Value() error = %v, wantErr %v", err, tt.wantErr) + return + } + if !reflect.DeepEqual(got, tt.want) { + t.Errorf("Hash.Value() = %v, want %v", got, tt.want) + } + }) + } +} + +func TestAddress_Scan(t *testing.T) { + type args struct { + src interface{} + } + tests := []struct { + name string + args args + wantErr bool + }{ + { + name: "working scan", + args: args{src: []byte{ + 0xb2, 0x6f, 0x2b, 0x34, 0x2a, 0xab, 0x24, 0xbc, 0xf6, 0x3e, + 0xa2, 0x18, 0xc6, 0xa9, 0x27, 0x4d, 0x30, 0xab, 0x9a, 0x15, + }}, + wantErr: false, + }, + { + name: "non working scan", + args: args{src: int64(1234567890)}, + wantErr: true, + }, + { + name: "invalid length scan", + args: args{src: []byte{ + 0xb2, 0x6f, 0x2b, 0x34, 0x2a, 0xab, 0x24, 0xbc, 0xf6, 0x3e, + 0xa2, 0x18, 0xc6, 0xa9, 0x27, 0x4d, 0x30, 0xab, 0x9a, + }}, + wantErr: true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + a := &Address{} + if err := a.Scan(tt.args.src); (err != nil) != tt.wantErr { + t.Errorf("Address.Scan() error = %v, wantErr %v", err, tt.wantErr) + } + + if !tt.wantErr { + for i := range a { + if a[i] != tt.args.src.([]byte)[i] { + t.Errorf( + "Address.Scan() didn't scan the %d src correctly (have %X, want %X)", + i, a[i], tt.args.src.([]byte)[i], + ) + } + } + } + }) + } +} + +func TestAddress_Value(t *testing.T) { + b := []byte{ + 0xb2, 0x6f, 0x2b, 0x34, 0x2a, 0xab, 0x24, 0xbc, 0xf6, 0x3e, + 0xa2, 0x18, 0xc6, 0xa9, 0x27, 0x4d, 0x30, 0xab, 0x9a, 0x15, + } + var usedA Address + usedA.SetBytes(b) + tests := []struct { + name string + a Address + want driver.Value + wantErr bool + }{ + { + name: "Working value", + a: usedA, + want: b, + wantErr: false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, err := tt.a.Value() + if (err != nil) != tt.wantErr { + t.Errorf("Address.Value() error = %v, wantErr %v", err, tt.wantErr) + return + } + if !reflect.DeepEqual(got, tt.want) { + t.Errorf("Address.Value() = %v, want %v", got, tt.want) + } + }) + } +} diff --git a/consensus/clique/api.go b/consensus/clique/api.go index b875eef0126a..6bcf987af55e 100644 --- a/consensus/clique/api.go +++ b/consensus/clique/api.go @@ -75,7 +75,7 @@ func (api *API) GetSigners(number *rpc.BlockNumber) ([]common.Address, error) { return snap.signers(), nil } -// GetSignersAtHash retrieves the state snapshot at a given block. +// GetSignersAtHash retrieves the list of authorized signers at the specified block. func (api *API) GetSignersAtHash(hash common.Hash) ([]common.Address, error) { header := api.chain.GetHeaderByHash(hash) if header == nil { diff --git a/consensus/clique/clique.go b/consensus/clique/clique.go index 01df4d5c78d7..08594470147b 100644 --- a/consensus/clique/clique.go +++ b/consensus/clique/clique.go @@ -53,7 +53,6 @@ const ( // Clique proof-of-authority protocol constants. var ( epochLength = uint64(30000) // Default number of blocks after which to checkpoint and reset the pending votes - blockPeriod = uint64(15) // Default minimum difference between two consecutive block's timestamps extraVanity = 32 // Fixed number of extra-data prefix bytes reserved for signer vanity extraSeal = 65 // Fixed number of extra-data suffix bytes reserved for signer seal @@ -388,22 +387,23 @@ func (c *Clique) snapshot(chain consensus.ChainReader, number uint64, hash commo break } } - // If we're at block zero, make a snapshot - if number == 0 { - genesis := chain.GetHeaderByNumber(0) - if err := c.VerifyHeader(chain, genesis, false); err != nil { - return nil, err - } - signers := make([]common.Address, (len(genesis.Extra)-extraVanity-extraSeal)/common.AddressLength) - for i := 0; i < len(signers); i++ { - copy(signers[i][:], genesis.Extra[extraVanity+i*common.AddressLength:]) - } - snap = newSnapshot(c.config, c.signatures, 0, genesis.Hash(), signers) - if err := snap.store(c.db); err != nil { - return nil, err + // If we're at an checkpoint block, make a snapshot if it's known + if number%c.config.Epoch == 0 { + checkpoint := chain.GetHeaderByNumber(number) + if checkpoint != nil { + hash := checkpoint.Hash() + + signers := make([]common.Address, (len(checkpoint.Extra)-extraVanity-extraSeal)/common.AddressLength) + for i := 0; i < len(signers); i++ { + copy(signers[i][:], checkpoint.Extra[extraVanity+i*common.AddressLength:]) + } + snap = newSnapshot(c.config, c.signatures, number, hash, signers) + if err := snap.store(c.db); err != nil { + return nil, err + } + log.Info("Stored checkpoint snapshot to disk", "number", number, "hash", hash) + break } - log.Trace("Stored genesis voting snapshot to disk") - break } // No snapshot for this header, gather the header and move backward var header *types.Header @@ -673,6 +673,11 @@ func CalcDifficulty(snap *Snapshot, signer common.Address) *big.Int { return new(big.Int).Set(diffNoTurn) } +// Close implements consensus.Engine. It's a noop for clique as there is are no background threads. +func (c *Clique) Close() error { + return nil +} + // APIs implements consensus.Engine, returning the user facing RPC API to allow // controlling the signer voting. func (c *Clique) APIs(chain consensus.ChainReader) []rpc.API { diff --git a/consensus/clique/snapshot.go b/consensus/clique/snapshot.go index 9ebdb8df1580..2333d69247b0 100644 --- a/consensus/clique/snapshot.go +++ b/consensus/clique/snapshot.go @@ -19,6 +19,7 @@ package clique import ( "bytes" "encoding/json" + "sort" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" @@ -56,6 +57,13 @@ type Snapshot struct { Tally map[common.Address]Tally `json:"tally"` // Current vote tally to avoid recalculating } +// signers implements the sort interface to allow sorting a list of addresses +type signers []common.Address + +func (s signers) Len() int { return len(s) } +func (s signers) Less(i, j int) bool { return bytes.Compare(s[i][:], s[j][:]) < 0 } +func (s signers) Swap(i, j int) { s[i], s[j] = s[j], s[i] } + // newSnapshot creates a new snapshot with the specified startup parameters. This // method does not initialize the set of recent signers, so only ever use if for // the genesis block. @@ -286,18 +294,12 @@ func (s *Snapshot) apply(headers []*types.Header) (*Snapshot, error) { // signers retrieves the list of authorized signers in ascending order. func (s *Snapshot) signers() []common.Address { - signers := make([]common.Address, 0, len(s.Signers)) - for signer := range s.Signers { - signers = append(signers, signer) - } - for i := 0; i < len(signers); i++ { - for j := i + 1; j < len(signers); j++ { - if bytes.Compare(signers[i][:], signers[j][:]) > 0 { - signers[i], signers[j] = signers[j], signers[i] - } - } + sigs := make([]common.Address, 0, len(s.Signers)) + for sig := range s.Signers { + sigs = append(sigs, sig) } - return signers + sort.Sort(signers(sigs)) + return sigs } // inturn returns if a signer at a given block height is in-turn or not. diff --git a/consensus/clique/snapshot_test.go b/consensus/clique/snapshot_test.go index 29a837983208..17719884f0e7 100644 --- a/consensus/clique/snapshot_test.go +++ b/consensus/clique/snapshot_test.go @@ -84,7 +84,7 @@ func (r *testerChainReader) GetHeaderByNumber(number uint64) *types.Header { if number == 0 { return rawdb.ReadHeader(r.db, rawdb.ReadCanonicalHash(r.db, 0), 0) } - panic("not supported") + return nil } // Tests that voting is evaluated correctly for various simple and complex scenarios. @@ -360,7 +360,7 @@ func TestVoting(t *testing.T) { for j, vote := range tt.votes { headers[j] = &types.Header{ Number: big.NewInt(int64(j) + 1), - Time: big.NewInt(int64(j) * int64(blockPeriod)), + Time: big.NewInt(int64(j) * 15), Coinbase: accounts.address(vote.voted), Extra: make([]byte, extraVanity+extraSeal), } diff --git a/consensus/consensus.go b/consensus/consensus.go index 5774af1a78eb..82717544455c 100644 --- a/consensus/consensus.go +++ b/consensus/consensus.go @@ -96,6 +96,9 @@ type Engine interface { // APIs returns the RPC APIs this consensus engine provides. APIs(chain ChainReader) []rpc.API + + // Close terminates any background threads maintained by the consensus engine. + Close() error } // PoW is a consensus engine based on proof-of-work. diff --git a/consensus/ethash/algorithm.go b/consensus/ethash/algorithm.go index fa1c2c8246f2..f252a7f3a305 100644 --- a/consensus/ethash/algorithm.go +++ b/consensus/ethash/algorithm.go @@ -214,15 +214,6 @@ func swap(buffer []byte) { } } -// prepare converts an ethash cache or dataset from a byte stream into the internal -// int representation. All ethash methods work with ints to avoid constant byte to -// int conversions as well as to handle both little and big endian systems. -func prepare(dest []uint32, src []byte) { - for i := 0; i < len(dest); i++ { - dest[i] = binary.LittleEndian.Uint32(src[i*4:]) - } -} - // fnv is an algorithm inspired by the FNV hash, which in some cases is used as // a non-associative substitute for XOR. Note that we multiply the prime with // the full 32-bit input, in contrast with the FNV-1 spec which multiplies the diff --git a/consensus/ethash/algorithm_test.go b/consensus/ethash/algorithm_test.go index 841e39233fa0..db22cccd0c87 100644 --- a/consensus/ethash/algorithm_test.go +++ b/consensus/ethash/algorithm_test.go @@ -18,6 +18,7 @@ package ethash import ( "bytes" + "encoding/binary" "io/ioutil" "math/big" "os" @@ -30,6 +31,15 @@ import ( "github.com/ethereum/go-ethereum/core/types" ) +// prepare converts an ethash cache or dataset from a byte stream into the internal +// int representation. All ethash methods work with ints to avoid constant byte to +// int conversions as well as to handle both little and big endian systems. +func prepare(dest []uint32, src []byte) { + for i := 0; i < len(dest); i++ { + dest[i] = binary.LittleEndian.Uint32(src[i*4:]) + } +} + // Tests whether the dataset size calculator works correctly by cross checking the // hard coded lookup table with the value generated by it. func TestSizeCalculations(t *testing.T) { @@ -719,7 +729,8 @@ func TestConcurrentDiskCacheGeneration(t *testing.T) { go func(idx int) { defer pend.Done() - ethash := New(Config{cachedir, 0, 1, "", 0, 0, ModeNormal}) + ethash := New(Config{cachedir, 0, 1, "", 0, 0, ModeNormal}, nil) + defer ethash.Close() if err := ethash.VerifySeal(nil, block.Header()); err != nil { t.Errorf("proc %d: block verification failed: %v", idx, err) } diff --git a/consensus/ethash/api.go b/consensus/ethash/api.go new file mode 100644 index 000000000000..a04ea235d925 --- /dev/null +++ b/consensus/ethash/api.go @@ -0,0 +1,117 @@ +// Copyright 2018 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package ethash + +import ( + "errors" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/ethereum/go-ethereum/core/types" +) + +var errEthashStopped = errors.New("ethash stopped") + +// API exposes ethash related methods for the RPC interface. +type API struct { + ethash *Ethash // Make sure the mode of ethash is normal. +} + +// GetWork returns a work package for external miner. +// +// The work package consists of 3 strings: +// result[0] - 32 bytes hex encoded current block header pow-hash +// result[1] - 32 bytes hex encoded seed hash used for DAG +// result[2] - 32 bytes hex encoded boundary condition ("target"), 2^256/difficulty +func (api *API) GetWork() ([3]string, error) { + if api.ethash.config.PowMode != ModeNormal && api.ethash.config.PowMode != ModeTest { + return [3]string{}, errors.New("not supported") + } + + var ( + workCh = make(chan [3]string, 1) + errc = make(chan error, 1) + ) + + select { + case api.ethash.fetchWorkCh <- &sealWork{errc: errc, res: workCh}: + case <-api.ethash.exitCh: + return [3]string{}, errEthashStopped + } + + select { + case work := <-workCh: + return work, nil + case err := <-errc: + return [3]string{}, err + } +} + +// SubmitWork can be used by external miner to submit their POW solution. +// It returns an indication if the work was accepted. +// Note either an invalid solution, a stale work a non-existent work will return false. +func (api *API) SubmitWork(nonce types.BlockNonce, hash, digest common.Hash) bool { + if api.ethash.config.PowMode != ModeNormal && api.ethash.config.PowMode != ModeTest { + return false + } + + var errc = make(chan error, 1) + + select { + case api.ethash.submitWorkCh <- &mineResult{ + nonce: nonce, + mixDigest: digest, + hash: hash, + errc: errc, + }: + case <-api.ethash.exitCh: + return false + } + + err := <-errc + return err == nil +} + +// SubmitHashrate can be used for remote miners to submit their hash rate. +// This enables the node to report the combined hash rate of all miners +// which submit work through this node. +// +// It accepts the miner hash rate and an identifier which must be unique +// between nodes. +func (api *API) SubmitHashRate(rate hexutil.Uint64, id common.Hash) bool { + if api.ethash.config.PowMode != ModeNormal && api.ethash.config.PowMode != ModeTest { + return false + } + + var done = make(chan struct{}, 1) + + select { + case api.ethash.submitRateCh <- &hashrate{done: done, rate: uint64(rate), id: id}: + case <-api.ethash.exitCh: + return false + } + + // Block until hash rate submitted successfully. + <-done + + return true +} + +// GetHashrate returns the current hashrate for local CPU miner and remote miner. +func (api *API) GetHashrate() uint64 { + return uint64(api.ethash.Hashrate()) +} diff --git a/consensus/ethash/consensus.go b/consensus/ethash/consensus.go index 99eec82211e2..86fd997ae440 100644 --- a/consensus/ethash/consensus.go +++ b/consensus/ethash/consensus.go @@ -24,6 +24,7 @@ import ( "runtime" "time" + mapset "github.com/deckarep/golang-set" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/math" "github.com/ethereum/go-ethereum/consensus" @@ -31,7 +32,6 @@ import ( "github.com/ethereum/go-ethereum/core/state" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/params" - set "gopkg.in/fatih/set.v0" ) // Ethash proof-of-work protocol constants. @@ -177,7 +177,7 @@ func (ethash *Ethash) VerifyUncles(chain consensus.ChainReader, block *types.Blo return errTooManyUncles } // Gather the set of past uncles and ancestors - uncles, ancestors := set.New(), make(map[common.Hash]*types.Header) + uncles, ancestors := mapset.NewSet(), make(map[common.Hash]*types.Header) number, parent := block.NumberU64()-1, block.ParentHash() for i := 0; i < 7; i++ { @@ -198,7 +198,7 @@ func (ethash *Ethash) VerifyUncles(chain consensus.ChainReader, block *types.Blo for _, uncle := range block.Uncles() { // Make sure every uncle is rewarded only once hash := uncle.Hash() - if uncles.Has(hash) { + if uncles.Contains(hash) { return errDuplicateUncle } uncles.Add(hash) @@ -356,8 +356,8 @@ func calcDifficultyByzantium(time uint64, parent *types.Header) *big.Int { x.Set(params.MinimumDifficulty) } // calculate a fake block number for the ice-age delay: - // https://github.com/ethereum/EIPs/pull/669 - // fake_block_number = min(0, block.number - 3_000_000 + // https://github.com/ethereum/EIPs/pull/669 + // fake_block_number = max(0, block.number - 3_000_000) fakeBlockNumber := new(big.Int) if parent.Number.Cmp(big2999999) >= 0 { fakeBlockNumber = fakeBlockNumber.Sub(parent.Number, big2999999) // Note, parent is 1 less than the actual block number @@ -461,6 +461,13 @@ func calcDifficultyFrontier(time uint64, parent *types.Header) *big.Int { // VerifySeal implements consensus.Engine, checking whether the given block satisfies // the PoW difficulty requirements. func (ethash *Ethash) VerifySeal(chain consensus.ChainReader, header *types.Header) error { + return ethash.verifySeal(chain, header, false) +} + +// verifySeal checks whether a block satisfies the PoW difficulty requirements, +// either using the usual ethash cache for it, or alternatively using a full DAG +// to make remote mining fast. +func (ethash *Ethash) verifySeal(chain consensus.ChainReader, header *types.Header, fulldag bool) error { // If we're running a fake PoW, accept any seal as valid if ethash.config.PowMode == ModeFake || ethash.config.PowMode == ModeFullFake { time.Sleep(ethash.fakeDelay) @@ -471,29 +478,52 @@ func (ethash *Ethash) VerifySeal(chain consensus.ChainReader, header *types.Head } // If we're running a shared PoW, delegate verification to it if ethash.shared != nil { - return ethash.shared.VerifySeal(chain, header) + return ethash.shared.verifySeal(chain, header, fulldag) } // Ensure that we have a valid difficulty for the block if header.Difficulty.Sign() <= 0 { return errInvalidDifficulty } - // Recompute the digest and PoW value and verify against the header + // Recompute the digest and PoW values number := header.Number.Uint64() - cache := ethash.cache(number) - size := datasetSize(number) - if ethash.config.PowMode == ModeTest { - size = 32 * 1024 + var ( + digest []byte + result []byte + ) + // If fast-but-heavy PoW verification was requested, use an ethash dataset + if fulldag { + dataset := ethash.dataset(number, true) + if dataset.generated() { + digest, result = hashimotoFull(dataset.dataset, header.HashNoNonce().Bytes(), header.Nonce.Uint64()) + + // Datasets are unmapped in a finalizer. Ensure that the dataset stays alive + // until after the call to hashimotoFull so it's not unmapped while being used. + runtime.KeepAlive(dataset) + } else { + // Dataset not yet generated, don't hang, use a cache instead + fulldag = false + } } - digest, result := hashimotoLight(size, cache.cache, header.HashNoNonce().Bytes(), header.Nonce.Uint64()) - // Caches are unmapped in a finalizer. Ensure that the cache stays live - // until after the call to hashimotoLight so it's not unmapped while being used. - runtime.KeepAlive(cache) + // If slow-but-light PoW verification was requested (or DAG not yet ready), use an ethash cache + if !fulldag { + cache := ethash.cache(number) + + size := datasetSize(number) + if ethash.config.PowMode == ModeTest { + size = 32 * 1024 + } + digest, result = hashimotoLight(size, cache.cache, header.HashNoNonce().Bytes(), header.Nonce.Uint64()) + // Caches are unmapped in a finalizer. Ensure that the cache stays alive + // until after the call to hashimotoLight so it's not unmapped while being used. + runtime.KeepAlive(cache) + } + // Verify the calculated values against the ones provided in the header if !bytes.Equal(header.MixDigest[:], digest) { return errInvalidMixDigest } - target := new(big.Int).Div(maxUint256, header.Difficulty) + target := new(big.Int).Div(two256, header.Difficulty) if new(big.Int).SetBytes(result).Cmp(target) > 0 { return errInvalidPoW } diff --git a/consensus/ethash/ethash.go b/consensus/ethash/ethash.go index f79dd6c36b94..d98c3371c560 100644 --- a/consensus/ethash/ethash.go +++ b/consensus/ethash/ethash.go @@ -29,11 +29,14 @@ import ( "runtime" "strconv" "sync" + "sync/atomic" "time" "unsafe" mmap "github.com/edsrzf/mmap-go" + "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/consensus" + "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/metrics" "github.com/ethereum/go-ethereum/rpc" @@ -43,11 +46,11 @@ import ( var ErrInvalidDumpMagic = errors.New("invalid dump magic") var ( - // maxUint256 is a big integer representing 2^256-1 - maxUint256 = new(big.Int).Exp(big.NewInt(2), big.NewInt(256), big.NewInt(0)) + // two256 is a big integer representing 2^256 + two256 = new(big.Int).Exp(big.NewInt(2), big.NewInt(256), big.NewInt(0)) // sharedEthash is a full instance that can be shared between multiple users. - sharedEthash = New(Config{"", 3, 0, "", 1, 0, ModeNormal}) + sharedEthash = New(Config{"", 3, 0, "", 1, 0, ModeNormal}, nil) // algorithmRevision is the data structure version used for file naming. algorithmRevision = 23 @@ -279,6 +282,7 @@ type dataset struct { mmap mmap.MMap // Memory map itself to unmap before releasing dataset []uint32 // The actual cache data content once sync.Once // Ensures the cache is generated only once + done uint32 // Atomic flag to determine generation status } // newDataset creates a new ethash mining dataset and returns it as a plain Go @@ -290,6 +294,9 @@ func newDataset(epoch uint64) interface{} { // generate ensures that the dataset content is generated before use. func (d *dataset) generate(dir string, limit int, test bool) { d.once.Do(func() { + // Mark the dataset generated after we're done. This is needed for remote + defer atomic.StoreUint32(&d.done, 1) + csize := cacheSize(d.epoch*epochLength + 1) dsize := datasetSize(d.epoch*epochLength + 1) seed := seedHash(d.epoch*epochLength + 1) @@ -304,6 +311,8 @@ func (d *dataset) generate(dir string, limit int, test bool) { d.dataset = make([]uint32, dsize/4) generateDataset(d.dataset, d.epoch, cache) + + return } // Disk storage is needed, this will get fancy var endian string @@ -346,6 +355,13 @@ func (d *dataset) generate(dir string, limit int, test bool) { }) } +// generated returns whether this particular dataset finished generating already +// or not (it may not have been started at all). This is useful for remote miners +// to default to verification caches instead of blocking on DAG generations. +func (d *dataset) generated() bool { + return atomic.LoadUint32(&d.done) == 1 +} + // finalizer closes any file handlers and memory maps open. func (d *dataset) finalizer() { if d.mmap != nil { @@ -389,6 +405,30 @@ type Config struct { PowMode Mode } +// mineResult wraps the pow solution parameters for the specified block. +type mineResult struct { + nonce types.BlockNonce + mixDigest common.Hash + hash common.Hash + + errc chan error +} + +// hashrate wraps the hash rate submitted by the remote sealer. +type hashrate struct { + id common.Hash + ping time.Time + rate uint64 + + done chan struct{} +} + +// sealWork wraps a seal work package for remote sealer. +type sealWork struct { + errc chan error + res chan [3]string +} + // Ethash is a consensus engine based on proof-of-work implementing the ethash // algorithm. type Ethash struct { @@ -403,16 +443,28 @@ type Ethash struct { update chan struct{} // Notification channel to update mining parameters hashrate metrics.Meter // Meter tracking the average hashrate + // Remote sealer related fields + workCh chan *types.Block // Notification channel to push new work to remote sealer + resultCh chan *types.Block // Channel used by mining threads to return result + fetchWorkCh chan *sealWork // Channel used for remote sealer to fetch mining work + submitWorkCh chan *mineResult // Channel used for remote sealer to submit their mining result + fetchRateCh chan chan uint64 // Channel used to gather submitted hash rate for local or remote sealer. + submitRateCh chan *hashrate // Channel used for remote sealer to submit their mining hashrate + // The fields below are hooks for testing shared *Ethash // Shared PoW verifier to avoid cache regeneration fakeFail uint64 // Block number which fails PoW check even in fake mode fakeDelay time.Duration // Time delay to sleep for before returning from verify - lock sync.Mutex // Ensures thread safety for the in-memory caches and mining fields + lock sync.Mutex // Ensures thread safety for the in-memory caches and mining fields + closeOnce sync.Once // Ensures exit channel will not be closed twice. + exitCh chan chan error // Notification channel to exiting backend threads } -// New creates a full sized ethash PoW scheme. -func New(config Config) *Ethash { +// New creates a full sized ethash PoW scheme and starts a background thread for +// remote mining, also optionally notifying a batch of remote services of new work +// packages. +func New(config Config, notify []string) *Ethash { if config.CachesInMem <= 0 { log.Warn("One ethash cache must always be in memory", "requested", config.CachesInMem) config.CachesInMem = 1 @@ -423,19 +475,43 @@ func New(config Config) *Ethash { if config.DatasetDir != "" && config.DatasetsOnDisk > 0 { log.Info("Disk storage enabled for ethash DAGs", "dir", config.DatasetDir, "count", config.DatasetsOnDisk) } - return &Ethash{ - config: config, - caches: newlru("cache", config.CachesInMem, newCache), - datasets: newlru("dataset", config.DatasetsInMem, newDataset), - update: make(chan struct{}), - hashrate: metrics.NewMeter(), + ethash := &Ethash{ + config: config, + caches: newlru("cache", config.CachesInMem, newCache), + datasets: newlru("dataset", config.DatasetsInMem, newDataset), + update: make(chan struct{}), + hashrate: metrics.NewMeter(), + workCh: make(chan *types.Block), + resultCh: make(chan *types.Block), + fetchWorkCh: make(chan *sealWork), + submitWorkCh: make(chan *mineResult), + fetchRateCh: make(chan chan uint64), + submitRateCh: make(chan *hashrate), + exitCh: make(chan chan error), } + go ethash.remote(notify) + return ethash } // NewTester creates a small sized ethash PoW scheme useful only for testing // purposes. -func NewTester() *Ethash { - return New(Config{CachesInMem: 1, PowMode: ModeTest}) +func NewTester(notify []string) *Ethash { + ethash := &Ethash{ + config: Config{PowMode: ModeTest}, + caches: newlru("cache", 1, newCache), + datasets: newlru("dataset", 1, newDataset), + update: make(chan struct{}), + hashrate: metrics.NewMeter(), + workCh: make(chan *types.Block), + resultCh: make(chan *types.Block), + fetchWorkCh: make(chan *sealWork), + submitWorkCh: make(chan *mineResult), + fetchRateCh: make(chan chan uint64), + submitRateCh: make(chan *hashrate), + exitCh: make(chan chan error), + } + go ethash.remote(notify) + return ethash } // NewFaker creates a ethash consensus engine with a fake PoW scheme that accepts @@ -489,6 +565,22 @@ func NewShared() *Ethash { return &Ethash{shared: sharedEthash} } +// Close closes the exit channel to notify all backend threads exiting. +func (ethash *Ethash) Close() error { + var err error + ethash.closeOnce.Do(func() { + // Short circuit if the exit channel is not allocated. + if ethash.exitCh == nil { + return + } + errc := make(chan error) + ethash.exitCh <- errc + err = <-errc + close(ethash.exitCh) + }) + return err +} + // cache tries to retrieve a verification cache for the specified block number // by first checking against a list of in-memory caches, then against caches // stored on disk, and finally generating one if none can be found. @@ -511,20 +603,34 @@ func (ethash *Ethash) cache(block uint64) *cache { // dataset tries to retrieve a mining dataset for the specified block number // by first checking against a list of in-memory datasets, then against DAGs // stored on disk, and finally generating one if none can be found. -func (ethash *Ethash) dataset(block uint64) *dataset { +// +// If async is specified, not only the future but the current DAG is also +// generates on a background thread. +func (ethash *Ethash) dataset(block uint64, async bool) *dataset { + // Retrieve the requested ethash dataset epoch := block / epochLength currentI, futureI := ethash.datasets.get(epoch) current := currentI.(*dataset) - // Wait for generation finish. - current.generate(ethash.config.DatasetDir, ethash.config.DatasetsOnDisk, ethash.config.PowMode == ModeTest) - - // If we need a new future dataset, now's a good time to regenerate it. - if futureI != nil { - future := futureI.(*dataset) - go future.generate(ethash.config.DatasetDir, ethash.config.DatasetsOnDisk, ethash.config.PowMode == ModeTest) + // If async is specified, generate everything in a background thread + if async && !current.generated() { + go func() { + current.generate(ethash.config.DatasetDir, ethash.config.DatasetsOnDisk, ethash.config.PowMode == ModeTest) + + if futureI != nil { + future := futureI.(*dataset) + future.generate(ethash.config.DatasetDir, ethash.config.DatasetsOnDisk, ethash.config.PowMode == ModeTest) + } + }() + } else { + // Either blocking generation was requested, or already done + current.generate(ethash.config.DatasetDir, ethash.config.DatasetsOnDisk, ethash.config.PowMode == ModeTest) + + if futureI != nil { + future := futureI.(*dataset) + go future.generate(ethash.config.DatasetDir, ethash.config.DatasetsOnDisk, ethash.config.PowMode == ModeTest) + } } - return current } @@ -561,14 +667,44 @@ func (ethash *Ethash) SetThreads(threads int) { // Hashrate implements PoW, returning the measured rate of the search invocations // per second over the last minute. +// Note the returned hashrate includes local hashrate, but also includes the total +// hashrate of all remote miner. func (ethash *Ethash) Hashrate() float64 { - return ethash.hashrate.Rate1() + // Short circuit if we are run the ethash in normal/test mode. + if ethash.config.PowMode != ModeNormal && ethash.config.PowMode != ModeTest { + return ethash.hashrate.Rate1() + } + var res = make(chan uint64, 1) + + select { + case ethash.fetchRateCh <- res: + case <-ethash.exitCh: + // Return local hashrate only if ethash is stopped. + return ethash.hashrate.Rate1() + } + + // Gather total submitted hash rate of remote sealers. + return ethash.hashrate.Rate1() + float64(<-res) } -// APIs implements consensus.Engine, returning the user facing RPC APIs. Currently -// that is empty. +// APIs implements consensus.Engine, returning the user facing RPC APIs. func (ethash *Ethash) APIs(chain consensus.ChainReader) []rpc.API { - return nil + // In order to ensure backward compatibility, we exposes ethash RPC APIs + // to both eth and ethash namespaces. + return []rpc.API{ + { + Namespace: "eth", + Version: "1.0", + Service: &API{ethash}, + Public: true, + }, + { + Namespace: "ethash", + Version: "1.0", + Service: &API{ethash}, + Public: true, + }, + } } // SeedHash is the seed to use for generating a verification cache and the mining diff --git a/consensus/ethash/ethash_test.go b/consensus/ethash/ethash_test.go index 31116da437dd..87ac17c2b203 100644 --- a/consensus/ethash/ethash_test.go +++ b/consensus/ethash/ethash_test.go @@ -23,22 +23,27 @@ import ( "os" "sync" "testing" + "time" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/core/types" ) // Tests that ethash works correctly in test mode. func TestTestMode(t *testing.T) { - head := &types.Header{Number: big.NewInt(1), Difficulty: big.NewInt(100)} + header := &types.Header{Number: big.NewInt(1), Difficulty: big.NewInt(100)} - ethash := NewTester() - block, err := ethash.Seal(nil, types.NewBlockWithHeader(head), nil) + ethash := NewTester(nil) + defer ethash.Close() + + block, err := ethash.Seal(nil, types.NewBlockWithHeader(header), nil) if err != nil { t.Fatalf("failed to seal block: %v", err) } - head.Nonce = types.EncodeNonce(block.Nonce()) - head.MixDigest = block.MixDigest() - if err := ethash.VerifySeal(nil, head); err != nil { + header.Nonce = types.EncodeNonce(block.Nonce()) + header.MixDigest = block.MixDigest() + if err := ethash.VerifySeal(nil, header); err != nil { t.Fatalf("unexpected verification error: %v", err) } } @@ -51,7 +56,8 @@ func TestCacheFileEvict(t *testing.T) { t.Fatal(err) } defer os.RemoveAll(tmpdir) - e := New(Config{CachesInMem: 3, CachesOnDisk: 10, CacheDir: tmpdir, PowMode: ModeTest}) + e := New(Config{CachesInMem: 3, CachesOnDisk: 10, CacheDir: tmpdir, PowMode: ModeTest}, nil) + defer e.Close() workers := 8 epochs := 100 @@ -73,7 +79,90 @@ func verifyTest(wg *sync.WaitGroup, e *Ethash, workerIndex, epochs int) { if block < 0 { block = 0 } - head := &types.Header{Number: big.NewInt(block), Difficulty: big.NewInt(100)} - e.VerifySeal(nil, head) + header := &types.Header{Number: big.NewInt(block), Difficulty: big.NewInt(100)} + e.VerifySeal(nil, header) + } +} + +func TestRemoteSealer(t *testing.T) { + ethash := NewTester(nil) + defer ethash.Close() + + api := &API{ethash} + if _, err := api.GetWork(); err != errNoMiningWork { + t.Error("expect to return an error indicate there is no mining work") + } + header := &types.Header{Number: big.NewInt(1), Difficulty: big.NewInt(100)} + block := types.NewBlockWithHeader(header) + + // Push new work. + ethash.Seal(nil, block, nil) + + var ( + work [3]string + err error + ) + if work, err = api.GetWork(); err != nil || work[0] != block.HashNoNonce().Hex() { + t.Error("expect to return a mining work has same hash") + } + + if res := api.SubmitWork(types.BlockNonce{}, block.HashNoNonce(), common.Hash{}); res { + t.Error("expect to return false when submit a fake solution") + } + // Push new block with same block number to replace the original one. + header = &types.Header{Number: big.NewInt(1), Difficulty: big.NewInt(1000)} + block = types.NewBlockWithHeader(header) + ethash.Seal(nil, block, nil) + + if work, err = api.GetWork(); err != nil || work[0] != block.HashNoNonce().Hex() { + t.Error("expect to return the latest pushed work") + } + // Push block with higher block number. + newHead := &types.Header{Number: big.NewInt(2), Difficulty: big.NewInt(100)} + newBlock := types.NewBlockWithHeader(newHead) + ethash.Seal(nil, newBlock, nil) + + if res := api.SubmitWork(types.BlockNonce{}, block.HashNoNonce(), common.Hash{}); res { + t.Error("expect to return false when submit a stale solution") + } +} + +func TestHashRate(t *testing.T) { + var ( + hashrate = []hexutil.Uint64{100, 200, 300} + expect uint64 + ids = []common.Hash{common.HexToHash("a"), common.HexToHash("b"), common.HexToHash("c")} + ) + ethash := NewTester(nil) + defer ethash.Close() + + if tot := ethash.Hashrate(); tot != 0 { + t.Error("expect the result should be zero") + } + + api := &API{ethash} + for i := 0; i < len(hashrate); i += 1 { + if res := api.SubmitHashRate(hashrate[i], ids[i]); !res { + t.Error("remote miner submit hashrate failed") + } + expect += uint64(hashrate[i]) + } + if tot := ethash.Hashrate(); tot != float64(expect) { + t.Error("expect total hashrate should be same") + } +} + +func TestClosedRemoteSealer(t *testing.T) { + ethash := NewTester(nil) + time.Sleep(1 * time.Second) // ensure exit channel is listening + ethash.Close() + + api := &API{ethash} + if _, err := api.GetWork(); err != errEthashStopped { + t.Error("expect to return an error to indicate ethash is stopped") + } + + if res := api.SubmitHashRate(hexutil.Uint64(100), common.HexToHash("a")); res { + t.Error("expect to return false when submit hashrate to a stopped ethash") } } diff --git a/consensus/ethash/sealer.go b/consensus/ethash/sealer.go index b5e742d8bbba..c3b2c86d1202 100644 --- a/consensus/ethash/sealer.go +++ b/consensus/ethash/sealer.go @@ -17,12 +17,17 @@ package ethash import ( + "bytes" crand "crypto/rand" + "encoding/json" + "errors" "math" "math/big" "math/rand" + "net/http" "runtime" "sync" + "time" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/consensus" @@ -30,6 +35,11 @@ import ( "github.com/ethereum/go-ethereum/log" ) +var ( + errNoMiningWork = errors.New("no mining work available yet") + errInvalidSealResult = errors.New("invalid or stale proof-of-work solution") +) + // Seal implements consensus.Engine, attempting to find a nonce that satisfies // the block's difficulty requirements. func (ethash *Ethash) Seal(chain consensus.ChainReader, block *types.Block, stop <-chan struct{}) (*types.Block, error) { @@ -45,7 +55,6 @@ func (ethash *Ethash) Seal(chain consensus.ChainReader, block *types.Block, stop } // Create a runner and the multiple search threads it directs abort := make(chan struct{}) - found := make(chan *types.Block) ethash.lock.Lock() threads := ethash.threads @@ -64,12 +73,16 @@ func (ethash *Ethash) Seal(chain consensus.ChainReader, block *types.Block, stop if threads < 0 { threads = 0 // Allows disabling local mining without extra logic around local/remote } + // Push new work to remote sealer + if ethash.workCh != nil { + ethash.workCh <- block + } var pend sync.WaitGroup for i := 0; i < threads; i++ { pend.Add(1) go func(id int, nonce uint64) { defer pend.Done() - ethash.mine(block, id, nonce, abort, found) + ethash.mine(block, id, nonce, abort, ethash.resultCh) }(i, uint64(ethash.rand.Int63())) } // Wait until sealing is terminated or a nonce is found @@ -78,7 +91,7 @@ func (ethash *Ethash) Seal(chain consensus.ChainReader, block *types.Block, stop case <-stop: // Outside abort, stop all miner threads close(abort) - case result = <-found: + case result = <-ethash.resultCh: // One of the threads found a block, abort all others close(abort) case <-ethash.update: @@ -99,9 +112,9 @@ func (ethash *Ethash) mine(block *types.Block, id int, seed uint64, abort chan s var ( header = block.Header() hash = header.HashNoNonce().Bytes() - target = new(big.Int).Div(maxUint256, header.Difficulty) + target = new(big.Int).Div(two256, header.Difficulty) number = header.Number.Uint64() - dataset = ethash.dataset(number) + dataset = ethash.dataset(number, false) ) // Start generating random nonces until we abort or find a good one var ( @@ -150,3 +163,164 @@ search: // during sealing so it's not unmapped while being read. runtime.KeepAlive(dataset) } + +// remote is a standalone goroutine to handle remote mining related stuff. +func (ethash *Ethash) remote(notify []string) { + var ( + works = make(map[common.Hash]*types.Block) + rates = make(map[common.Hash]hashrate) + + currentBlock *types.Block + currentWork [3]string + + notifyTransport = &http.Transport{} + notifyClient = &http.Client{ + Transport: notifyTransport, + Timeout: time.Second, + } + notifyReqs = make([]*http.Request, len(notify)) + ) + // notifyWork notifies all the specified mining endpoints of the availability of + // new work to be processed. + notifyWork := func() { + work := currentWork + blob, _ := json.Marshal(work) + + for i, url := range notify { + // Terminate any previously pending request and create the new work + if notifyReqs[i] != nil { + notifyTransport.CancelRequest(notifyReqs[i]) + } + notifyReqs[i], _ = http.NewRequest("POST", url, bytes.NewReader(blob)) + notifyReqs[i].Header.Set("Content-Type", "application/json") + + // Push the new work concurrently to all the remote nodes + go func(req *http.Request, url string) { + res, err := notifyClient.Do(req) + if err != nil { + log.Warn("Failed to notify remote miner", "err", err) + } else { + log.Trace("Notified remote miner", "miner", url, "hash", log.Lazy{Fn: func() common.Hash { return common.HexToHash(work[0]) }}, "target", work[2]) + res.Body.Close() + } + }(notifyReqs[i], url) + } + } + // makeWork creates a work package for external miner. + // + // The work package consists of 3 strings: + // result[0], 32 bytes hex encoded current block header pow-hash + // result[1], 32 bytes hex encoded seed hash used for DAG + // result[2], 32 bytes hex encoded boundary condition ("target"), 2^256/difficulty + makeWork := func(block *types.Block) { + hash := block.HashNoNonce() + + currentWork[0] = hash.Hex() + currentWork[1] = common.BytesToHash(SeedHash(block.NumberU64())).Hex() + currentWork[2] = common.BytesToHash(new(big.Int).Div(two256, block.Difficulty()).Bytes()).Hex() + + // Trace the seal work fetched by remote sealer. + currentBlock = block + works[hash] = block + } + // submitWork verifies the submitted pow solution, returning + // whether the solution was accepted or not (not can be both a bad pow as well as + // any other error, like no pending work or stale mining result). + submitWork := func(nonce types.BlockNonce, mixDigest common.Hash, hash common.Hash) bool { + // Make sure the work submitted is present + block := works[hash] + if block == nil { + log.Info("Work submitted but none pending", "hash", hash) + return false + } + // Verify the correctness of submitted result. + header := block.Header() + header.Nonce = nonce + header.MixDigest = mixDigest + + start := time.Now() + if err := ethash.verifySeal(nil, header, true); err != nil { + log.Warn("Invalid proof-of-work submitted", "hash", hash, "elapsed", time.Since(start), "err", err) + return false + } + // Make sure the result channel is created. + if ethash.resultCh == nil { + log.Warn("Ethash result channel is empty, submitted mining result is rejected") + return false + } + log.Trace("Verified correct proof-of-work", "hash", hash, "elapsed", time.Since(start)) + + // Solutions seems to be valid, return to the miner and notify acceptance. + select { + case ethash.resultCh <- block.WithSeal(header): + delete(works, hash) + return true + default: + log.Info("Work submitted is stale", "hash", hash) + return false + } + } + + ticker := time.NewTicker(5 * time.Second) + defer ticker.Stop() + + for { + select { + case block := <-ethash.workCh: + if currentBlock != nil && block.ParentHash() != currentBlock.ParentHash() { + // Start new round mining, throw out all previous work. + works = make(map[common.Hash]*types.Block) + } + // Update current work with new received block. + // Note same work can be past twice, happens when changing CPU threads. + makeWork(block) + + // Notify and requested URLs of the new work availability + notifyWork() + + case work := <-ethash.fetchWorkCh: + // Return current mining work to remote miner. + if currentBlock == nil { + work.errc <- errNoMiningWork + } else { + work.res <- currentWork + } + + case result := <-ethash.submitWorkCh: + // Verify submitted PoW solution based on maintained mining blocks. + if submitWork(result.nonce, result.mixDigest, result.hash) { + result.errc <- nil + } else { + result.errc <- errInvalidSealResult + } + + case result := <-ethash.submitRateCh: + // Trace remote sealer's hash rate by submitted value. + rates[result.id] = hashrate{rate: result.rate, ping: time.Now()} + close(result.done) + + case req := <-ethash.fetchRateCh: + // Gather all hash rate submitted by remote sealer. + var total uint64 + for _, rate := range rates { + // this could overflow + total += rate.rate + } + req <- total + + case <-ticker.C: + // Clear stale submitted hash rate. + for id, rate := range rates { + if time.Since(rate.ping) > 10*time.Second { + delete(rates, id) + } + } + + case errc := <-ethash.exitCh: + // Exit remote loop if ethash is closed and return relevant error. + errc <- nil + log.Trace("Ethash remote sealer is exiting") + return + } + } +} diff --git a/consensus/ethash/sealer_test.go b/consensus/ethash/sealer_test.go new file mode 100644 index 000000000000..6c7157a5aeeb --- /dev/null +++ b/consensus/ethash/sealer_test.go @@ -0,0 +1,115 @@ +package ethash + +import ( + "encoding/json" + "io/ioutil" + "math/big" + "net" + "net/http" + "testing" + "time" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" +) + +// Tests whether remote HTTP servers are correctly notified of new work. +func TestRemoteNotify(t *testing.T) { + // Start a simple webserver to capture notifications + sink := make(chan [3]string) + + server := &http.Server{ + Handler: http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + blob, err := ioutil.ReadAll(req.Body) + if err != nil { + t.Fatalf("failed to read miner notification: %v", err) + } + var work [3]string + if err := json.Unmarshal(blob, &work); err != nil { + t.Fatalf("failed to unmarshal miner notification: %v", err) + } + sink <- work + }), + } + // Open a custom listener to extract its local address + listener, err := net.Listen("tcp", "localhost:0") + if err != nil { + t.Fatalf("failed to open notification server: %v", err) + } + defer listener.Close() + + go server.Serve(listener) + + // Create the custom ethash engine + ethash := NewTester([]string{"http://" + listener.Addr().String()}) + defer ethash.Close() + + // Stream a work task and ensure the notification bubbles out + header := &types.Header{Number: big.NewInt(1), Difficulty: big.NewInt(100)} + block := types.NewBlockWithHeader(header) + + ethash.Seal(nil, block, nil) + select { + case work := <-sink: + if want := header.HashNoNonce().Hex(); work[0] != want { + t.Errorf("work packet hash mismatch: have %s, want %s", work[0], want) + } + if want := common.BytesToHash(SeedHash(header.Number.Uint64())).Hex(); work[1] != want { + t.Errorf("work packet seed mismatch: have %s, want %s", work[1], want) + } + target := new(big.Int).Div(new(big.Int).Lsh(big.NewInt(1), 256), header.Difficulty) + if want := common.BytesToHash(target.Bytes()).Hex(); work[2] != want { + t.Errorf("work packet target mismatch: have %s, want %s", work[2], want) + } + case <-time.After(time.Second): + t.Fatalf("notification timed out") + } +} + +// Tests that pushing work packages fast to the miner doesn't cause any daa race +// issues in the notifications. +func TestRemoteMultiNotify(t *testing.T) { + // Start a simple webserver to capture notifications + sink := make(chan [3]string, 64) + + server := &http.Server{ + Handler: http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + blob, err := ioutil.ReadAll(req.Body) + if err != nil { + t.Fatalf("failed to read miner notification: %v", err) + } + var work [3]string + if err := json.Unmarshal(blob, &work); err != nil { + t.Fatalf("failed to unmarshal miner notification: %v", err) + } + sink <- work + }), + } + // Open a custom listener to extract its local address + listener, err := net.Listen("tcp", "localhost:0") + if err != nil { + t.Fatalf("failed to open notification server: %v", err) + } + defer listener.Close() + + go server.Serve(listener) + + // Create the custom ethash engine + ethash := NewTester([]string{"http://" + listener.Addr().String()}) + defer ethash.Close() + + // Stream a lot of work task and ensure all the notifications bubble out + for i := 0; i < cap(sink); i++ { + header := &types.Header{Number: big.NewInt(int64(i)), Difficulty: big.NewInt(100)} + block := types.NewBlockWithHeader(header) + + ethash.Seal(nil, block, nil) + } + for i := 0; i < cap(sink); i++ { + select { + case <-sink: + case <-time.After(250 * time.Millisecond): + t.Fatalf("notification %d timed out", i) + } + } +} diff --git a/console/console.go b/console/console.go index 56e03837ac44..3c397f800614 100644 --- a/console/console.go +++ b/console/console.go @@ -314,7 +314,7 @@ func (c *Console) Interactive() { input = "" // Current user input scheduler = make(chan string) // Channel to send the next prompt on and receive the input ) - // Start a goroutine to listen for promt requests and send back inputs + // Start a goroutine to listen for prompt requests and send back inputs go func() { for { // Read the next user input diff --git a/console/console_test.go b/console/console_test.go index 7b1629c032a1..26465ca6f450 100644 --- a/console/console_test.go +++ b/console/console_test.go @@ -201,7 +201,7 @@ func TestInteractive(t *testing.T) { go tester.console.Interactive() - // Wait for a promt and send a statement back + // Wait for a prompt and send a statement back select { case <-tester.input.scheduler: case <-time.After(time.Second): @@ -212,7 +212,7 @@ func TestInteractive(t *testing.T) { case <-time.After(time.Second): t.Fatalf("input feedback timeout") } - // Wait for the second promt and ensure first statement was evaluated + // Wait for the second prompt and ensure first statement was evaluated select { case <-tester.input.scheduler: case <-time.After(time.Second): @@ -249,7 +249,7 @@ func TestExecute(t *testing.T) { } // Tests that the JavaScript objects returned by statement executions are properly -// pretty printed instead of just displaing "[object]". +// pretty printed instead of just displaying "[object]". func TestPrettyPrint(t *testing.T) { tester := newTester(t, nil) defer tester.Close(t) @@ -300,7 +300,7 @@ func TestIndenting(t *testing.T) { }{ {`var a = 1;`, 0}, {`"some string"`, 0}, - {`"some string with (parentesis`, 0}, + {`"some string with (parenthesis`, 0}, {`"some string with newline ("`, 0}, {`function v(a,b) {}`, 0}, diff --git a/console/prompter.go b/console/prompter.go index c477b48178b3..9b90034db904 100644 --- a/console/prompter.go +++ b/console/prompter.go @@ -27,7 +27,7 @@ import ( // Only this reader may be used for input because it keeps an internal buffer. var Stdin = newTerminalPrompter() -// UserPrompter defines the methods needed by the console to promt the user for +// UserPrompter defines the methods needed by the console to prompt the user for // various types of inputs. type UserPrompter interface { // PromptInput displays the given prompt to the user and requests some textual diff --git a/contracts/chequebook/cheque_test.go b/contracts/chequebook/cheque_test.go index 6b6b28e6577b..4bd2e176b14f 100644 --- a/contracts/chequebook/cheque_test.go +++ b/contracts/chequebook/cheque_test.go @@ -46,7 +46,7 @@ func newTestBackend() *backends.SimulatedBackend { addr0: {Balance: big.NewInt(1000000000)}, addr1: {Balance: big.NewInt(1000000000)}, addr2: {Balance: big.NewInt(1000000000)}, - }) + }, 10000000) } func deploy(prvKey *ecdsa.PrivateKey, amount *big.Int, backend *backends.SimulatedBackend) (common.Address, error) { diff --git a/contracts/chequebook/gencode.go b/contracts/chequebook/gencode.go index 45f6d68f3eb9..ddfe8d151274 100644 --- a/contracts/chequebook/gencode.go +++ b/contracts/chequebook/gencode.go @@ -40,7 +40,7 @@ var ( ) func main() { - backend := backends.NewSimulatedBackend(testAlloc) + backend := backends.NewSimulatedBackend(testAlloc, uint64(100000000)) auth := bind.NewKeyedTransactor(testKey) // Deploy the contract, get the code. diff --git a/contracts/ens/ens_test.go b/contracts/ens/ens_test.go index 6ad844708202..411b04197cd2 100644 --- a/contracts/ens/ens_test.go +++ b/contracts/ens/ens_test.go @@ -35,7 +35,7 @@ var ( ) func TestENS(t *testing.T) { - contractBackend := backends.NewSimulatedBackend(core.GenesisAlloc{addr: {Balance: big.NewInt(1000000000)}}) + contractBackend := backends.NewSimulatedBackend(core.GenesisAlloc{addr: {Balance: big.NewInt(1000000000)}}, 10000000) transactOpts := bind.NewKeyedTransactor(key) ensAddr, ens, err := DeployENS(transactOpts, contractBackend) diff --git a/core/blockchain.go b/core/blockchain.go index 2f1e78423e4b..0461da7fd937 100644 --- a/core/blockchain.go +++ b/core/blockchain.go @@ -450,15 +450,19 @@ func (bc *BlockChain) ExportN(w io.Writer, first uint64, last uint64) error { } log.Info("Exporting batch of blocks", "count", last-first+1) + start, reported := time.Now(), time.Now() for nr := first; nr <= last; nr++ { block := bc.GetBlockByNumber(nr) if block == nil { return fmt.Errorf("export failed on #%d: not found", nr) } - if err := block.EncodeRLP(w); err != nil { return err } + if time.Since(reported) >= statsReportLimit { + log.Info("Exporting blocks", "exported", block.NumberU64()-first, "elapsed", common.PrettyDuration(time.Since(start))) + reported = time.Now() + } } return nil @@ -895,9 +899,7 @@ func (bc *BlockChain) WriteBlockWithState(block *types.Block, receipts []*types. if err := bc.hc.WriteTd(block.Hash(), block.NumberU64(), externTd); err != nil { return NonStatTy, err } - // Write other block data using a batch. - batch := bc.db.NewBatch() - rawdb.WriteBlock(batch, block) + rawdb.WriteBlock(bc.db, block) root, err := state.Commit(bc.chainConfig.IsEIP158(block.Number())) if err != nil { @@ -951,6 +953,9 @@ func (bc *BlockChain) WriteBlockWithState(block *types.Block, receipts []*types. } } } + + // Write other block data using a batch. + batch := bc.db.NewBatch() rawdb.WriteReceipts(batch, block.Hash(), block.NumberU64(), receipts) // If the total difficulty is higher than our known, add it to the canonical chain @@ -1012,7 +1017,7 @@ func (bc *BlockChain) insertChain(chain types.Blocks) (int, []interface{}, []*ty // Do a sanity check that the provided chain is actually ordered and linked for i := 1; i < len(chain); i++ { if chain[i].NumberU64() != chain[i-1].NumberU64()+1 || chain[i].ParentHash() != chain[i-1].Hash() { - // Chain broke ancestry, log a messge (programming error) and skip insertion + // Chain broke ancestry, log a message (programming error) and skip insertion log.Error("Non contiguous block insert", "number", chain[i].Number(), "hash", chain[i].Hash(), "parent", chain[i].ParentHash(), "prevnumber", chain[i-1].Number(), "prevhash", chain[i-1].Hash()) @@ -1203,8 +1208,8 @@ type insertStats struct { startTime mclock.AbsTime } -// statsReportLimit is the time limit during import after which we always print -// out progress. This avoids the user wondering what's going on. +// statsReportLimit is the time limit during import and export after which we +// always print out progress. This avoids the user wondering what's going on. const statsReportLimit = 8 * time.Second // report prints statistics if some number of blocks have been processed diff --git a/core/blockchain_test.go b/core/blockchain_test.go index 687209bfae4a..e452d6936730 100644 --- a/core/blockchain_test.go +++ b/core/blockchain_test.go @@ -1146,8 +1146,7 @@ func TestEIP155Transition(t *testing.T) { return types.SignTx(types.NewTransaction(block.TxNonce(address), common.Address{}, new(big.Int), 21000, new(big.Int), nil), signer, key) } ) - switch i { - case 0: + if i == 0 { tx, err = basicTx(types.NewEIP155Signer(big.NewInt(2))) if err != nil { t.Fatal(err) diff --git a/core/bloombits/generator.go b/core/bloombits/generator.go index 540085450d74..ae07481ada50 100644 --- a/core/bloombits/generator.go +++ b/core/bloombits/generator.go @@ -22,16 +22,22 @@ import ( "github.com/ethereum/go-ethereum/core/types" ) -// errSectionOutOfBounds is returned if the user tried to add more bloom filters -// to the batch than available space, or if tries to retrieve above the capacity, -var errSectionOutOfBounds = errors.New("section out of bounds") +var ( + // errSectionOutOfBounds is returned if the user tried to add more bloom filters + // to the batch than available space, or if tries to retrieve above the capacity. + errSectionOutOfBounds = errors.New("section out of bounds") + + // errBloomBitOutOfBounds is returned if the user tried to retrieve specified + // bit bloom above the capacity. + errBloomBitOutOfBounds = errors.New("bloom bit out of bounds") +) // Generator takes a number of bloom filters and generates the rotated bloom bits // to be used for batched filtering. type Generator struct { blooms [types.BloomBitLength][]byte // Rotated blooms for per-bit matching sections uint // Number of sections to batch together - nextBit uint // Next bit to set when adding a bloom + nextSec uint // Next section to set when adding a bloom } // NewGenerator creates a rotated bloom generator that can iteratively fill a @@ -51,15 +57,15 @@ func NewGenerator(sections uint) (*Generator, error) { // in memory accordingly. func (b *Generator) AddBloom(index uint, bloom types.Bloom) error { // Make sure we're not adding more bloom filters than our capacity - if b.nextBit >= b.sections { + if b.nextSec >= b.sections { return errSectionOutOfBounds } - if b.nextBit != index { + if b.nextSec != index { return errors.New("bloom filter with unexpected index") } // Rotate the bloom and insert into our collection - byteIndex := b.nextBit / 8 - bitMask := byte(1) << byte(7-b.nextBit%8) + byteIndex := b.nextSec / 8 + bitMask := byte(1) << byte(7-b.nextSec%8) for i := 0; i < types.BloomBitLength; i++ { bloomByteIndex := types.BloomByteLength - 1 - i/8 @@ -69,7 +75,7 @@ func (b *Generator) AddBloom(index uint, bloom types.Bloom) error { b.blooms[i][byteIndex] |= bitMask } } - b.nextBit++ + b.nextSec++ return nil } @@ -77,11 +83,11 @@ func (b *Generator) AddBloom(index uint, bloom types.Bloom) error { // Bitset returns the bit vector belonging to the given bit index after all // blooms have been added. func (b *Generator) Bitset(idx uint) ([]byte, error) { - if b.nextBit != b.sections { + if b.nextSec != b.sections { return nil, errors.New("bloom not fully generated yet") } - if idx >= b.sections { - return nil, errSectionOutOfBounds + if idx >= types.BloomBitLength { + return nil, errBloomBitOutOfBounds } return b.blooms[idx], nil } diff --git a/core/bloombits/matcher.go b/core/bloombits/matcher.go index 8d78adb75975..3ec0d5ae94e5 100644 --- a/core/bloombits/matcher.go +++ b/core/bloombits/matcher.go @@ -59,7 +59,7 @@ type partialMatches struct { // It can also have the actual results set to be used as a delivery data struct. // // The contest and error fields are used by the light client to terminate matching -// early if an error is enountered on some path of the pipeline. +// early if an error is encountered on some path of the pipeline. type Retrieval struct { Bit uint Sections []uint64 @@ -218,7 +218,7 @@ func (m *Matcher) Start(ctx context.Context, begin, end uint64, results chan uin // run creates a daisy-chain of sub-matchers, one for the address set and one // for each topic set, each sub-matcher receiving a section only if the previous // ones have all found a potential match in one of the blocks of the section, -// then binary AND-ing its own matches and forwaring the result to the next one. +// then binary AND-ing its own matches and forwarding the result to the next one. // // The method starts feeding the section indexes into the first sub-matcher on a // new goroutine and returns a sink channel receiving the results. @@ -543,7 +543,7 @@ func (s *MatcherSession) Error() error { } // AllocateRetrieval assigns a bloom bit index to a client process that can either -// immediately reuest and fetch the section contents assigned to this bit or wait +// immediately request and fetch the section contents assigned to this bit or wait // a little while for more sections to be requested. func (s *MatcherSession) AllocateRetrieval() (uint, bool) { fetcher := make(chan uint) @@ -599,8 +599,8 @@ func (s *MatcherSession) DeliverSections(bit uint, sections []uint64, bitsets [] } } -// Multiplex polls the matcher session for rerieval tasks and multiplexes it into -// the reuested retrieval queue to be serviced together with other sessions. +// Multiplex polls the matcher session for retrieval tasks and multiplexes it into +// the requested retrieval queue to be serviced together with other sessions. // // This method will block for the lifetime of the session. Even after termination // of the session, any request in-flight need to be responded to! Empty responses diff --git a/core/bloombits/matcher_test.go b/core/bloombits/matcher_test.go index 7a5f78ef3a51..91143e525e7f 100644 --- a/core/bloombits/matcher_test.go +++ b/core/bloombits/matcher_test.go @@ -156,7 +156,7 @@ func testMatcher(t *testing.T, filter [][]bloomIndexes, start, blocks uint64, in // Track the number of retrieval requests made var requested uint32 - // Start the matching session for the filter and the retriver goroutines + // Start the matching session for the filter and the retriever goroutines quit := make(chan struct{}) matches := make(chan uint64, 16) diff --git a/core/chain_indexer.go b/core/chain_indexer.go index 0b927116d066..11a7c96fa058 100644 --- a/core/chain_indexer.go +++ b/core/chain_indexer.go @@ -17,6 +17,7 @@ package core import ( + "context" "encoding/binary" "fmt" "sync" @@ -37,11 +38,11 @@ import ( type ChainIndexerBackend interface { // Reset initiates the processing of a new chain segment, potentially terminating // any partially completed operations (in case of a reorg). - Reset(section uint64, prevHead common.Hash) error + Reset(ctx context.Context, section uint64, prevHead common.Hash) error // Process crunches through the next header in the chain segment. The caller // will ensure a sequential order of headers. - Process(header *types.Header) + Process(ctx context.Context, header *types.Header) error // Commit finalizes the section metadata and stores it into the database. Commit() error @@ -71,9 +72,11 @@ type ChainIndexer struct { backend ChainIndexerBackend // Background processor generating the index data content children []*ChainIndexer // Child indexers to cascade chain updates to - active uint32 // Flag whether the event loop was started - update chan struct{} // Notification channel that headers should be processed - quit chan chan error // Quit channel to tear down running goroutines + active uint32 // Flag whether the event loop was started + update chan struct{} // Notification channel that headers should be processed + quit chan chan error // Quit channel to tear down running goroutines + ctx context.Context + ctxCancel func() sectionSize uint64 // Number of blocks in a single chain segment to process confirmsReq uint64 // Number of confirmations before processing a completed segment @@ -105,6 +108,8 @@ func NewChainIndexer(chainDb, indexDb ethdb.Database, backend ChainIndexerBacken } // Initialize database dependent fields and start the updater c.loadValidSections() + c.ctx, c.ctxCancel = context.WithCancel(context.Background()) + go c.updateLoop() return c @@ -138,6 +143,8 @@ func (c *ChainIndexer) Start(chain ChainIndexerChain) { func (c *ChainIndexer) Close() error { var errs []error + c.ctxCancel() + // Tear down the primary update loop errc := make(chan error) c.quit <- errc @@ -297,6 +304,12 @@ func (c *ChainIndexer) updateLoop() { c.lock.Unlock() newHead, err := c.processSection(section, oldHead) if err != nil { + select { + case <-c.ctx.Done(): + <-c.quit <- nil + return + default: + } c.log.Error("Section processing failed", "error", err) } c.lock.Lock() @@ -344,7 +357,7 @@ func (c *ChainIndexer) processSection(section uint64, lastHead common.Hash) (com // Reset and partial processing - if err := c.backend.Reset(section, lastHead); err != nil { + if err := c.backend.Reset(c.ctx, section, lastHead); err != nil { c.setValidSections(0) return common.Hash{}, err } @@ -360,11 +373,12 @@ func (c *ChainIndexer) processSection(section uint64, lastHead common.Hash) (com } else if header.ParentHash != lastHead { return common.Hash{}, fmt.Errorf("chain reorged during section processing") } - c.backend.Process(header) + if err := c.backend.Process(c.ctx, header); err != nil { + return common.Hash{}, err + } lastHead = header.Hash() } if err := c.backend.Commit(); err != nil { - c.log.Error("Section commit failed", "error", err) return common.Hash{}, err } return lastHead, nil diff --git a/core/chain_indexer_test.go b/core/chain_indexer_test.go index 550caf5567c8..a029dec62658 100644 --- a/core/chain_indexer_test.go +++ b/core/chain_indexer_test.go @@ -17,6 +17,7 @@ package core import ( + "context" "fmt" "math/big" "math/rand" @@ -210,13 +211,13 @@ func (b *testChainIndexBackend) reorg(headNum uint64) uint64 { return b.stored * b.indexer.sectionSize } -func (b *testChainIndexBackend) Reset(section uint64, prevHead common.Hash) error { +func (b *testChainIndexBackend) Reset(ctx context.Context, section uint64, prevHead common.Hash) error { b.section = section b.headerCnt = 0 return nil } -func (b *testChainIndexBackend) Process(header *types.Header) { +func (b *testChainIndexBackend) Process(ctx context.Context, header *types.Header) error { b.headerCnt++ if b.headerCnt > b.indexer.sectionSize { b.t.Error("Processing too many headers") @@ -227,6 +228,7 @@ func (b *testChainIndexBackend) Process(header *types.Header) { b.t.Fatal("Unexpected call to Process") case b.processCh <- header.Number.Uint64(): } + return nil } func (b *testChainIndexBackend) Commit() error { diff --git a/core/events.go b/core/events.go index 8d200f2a29fc..710bdb589485 100644 --- a/core/events.go +++ b/core/events.go @@ -29,9 +29,6 @@ type PendingLogsEvent struct { Logs []*types.Log } -// PendingStateEvent is posted pre mining and notifies of pending state changes. -type PendingStateEvent struct{} - // NewMinedBlockEvent is posted when a block has been imported. type NewMinedBlockEvent struct{ Block *types.Block } diff --git a/core/evm.go b/core/evm.go index 596ea95fb7ff..d303c40a4074 100644 --- a/core/evm.go +++ b/core/evm.go @@ -84,7 +84,7 @@ func GetHashFn(ref *types.Header, chain ChainContext) func(n uint64) common.Hash } } -// CanTransfer checks wether there are enough funds in the address' account to make a transfer. +// CanTransfer checks whether there are enough funds in the address' account to make a transfer. // This does not take the necessary gas in to account to make the transfer valid. func CanTransfer(db vm.StateDB, addr common.Address, amount *big.Int) bool { return db.GetBalance(addr).Cmp(amount) >= 0 diff --git a/core/headerchain.go b/core/headerchain.go index da6716d679fa..2bbec28bf3d5 100644 --- a/core/headerchain.go +++ b/core/headerchain.go @@ -208,7 +208,7 @@ func (hc *HeaderChain) ValidateHeaderChain(chain []*types.Header, checkFreq int) // Do a sanity check that the provided chain is actually ordered and linked for i := 1; i < len(chain); i++ { if chain[i].Number.Uint64() != chain[i-1].Number.Uint64()+1 || chain[i].ParentHash != chain[i-1].Hash() { - // Chain broke ancestry, log a messge (programming error) and skip insertion + // Chain broke ancestry, log a message (programming error) and skip insertion log.Error("Non contiguous header insert", "number", chain[i].Number, "hash", chain[i].Hash(), "parent", chain[i].ParentHash, "prevnumber", chain[i-1].Number, "prevhash", chain[i-1].Hash()) diff --git a/core/state_transition.go b/core/state_transition.go index 5654cd01eaae..fda081b7d113 100644 --- a/core/state_transition.go +++ b/core/state_transition.go @@ -35,7 +35,7 @@ var ( The State Transitioning Model A state transition is a change made when a transaction is applied to the current world state -The state transitioning model does all all the necessary work to work out a valid new state root. +The state transitioning model does all the necessary work to work out a valid new state root. 1) Nonce handling 2) Pre pay gas @@ -178,8 +178,8 @@ func (st *StateTransition) preCheck() error { } // TransitionDb will transition the state by applying the current message and -// returning the result including the the used gas. It returns an error if it -// failed. An error indicates a consensus issue. +// returning the result including the used gas. It returns an error if failed. +// An error indicates a consensus issue. func (st *StateTransition) TransitionDb() (ret []byte, usedGas uint64, failed bool, err error) { if err = st.preCheck(); err != nil { return diff --git a/core/tx_cacher.go b/core/tx_cacher.go index 6d989c83d98c..bcaa5ead3813 100644 --- a/core/tx_cacher.go +++ b/core/tx_cacher.go @@ -22,7 +22,7 @@ import ( "github.com/ethereum/go-ethereum/core/types" ) -// senderCacher is a concurrent tranaction sender recoverer anc cacher. +// senderCacher is a concurrent transaction sender recoverer anc cacher. var senderCacher = newTxSenderCacher(runtime.NumCPU()) // txSenderCacherRequest is a request for recovering transaction senders with a @@ -45,7 +45,7 @@ type txSenderCacher struct { } // newTxSenderCacher creates a new transaction sender background cacher and starts -// as many procesing goroutines as allowed by the GOMAXPROCS on construction. +// as many processing goroutines as allowed by the GOMAXPROCS on construction. func newTxSenderCacher(threads int) *txSenderCacher { cacher := &txSenderCacher{ tasks: make(chan *txSenderCacherRequest, threads), diff --git a/core/tx_list.go b/core/tx_list.go index 287dda4c33bf..57abc5148643 100644 --- a/core/tx_list.go +++ b/core/tx_list.go @@ -436,7 +436,7 @@ func (l *txPricedList) Removed() { } // Cap finds all the transactions below the given price threshold, drops them -// from the priced list and returs them for further removal from the entire pool. +// from the priced list and returns them for further removal from the entire pool. func (l *txPricedList) Cap(threshold *big.Int, local *accountSet) types.Transactions { drop := make(types.Transactions, 0, 128) // Remote underpriced transactions to drop save := make(types.Transactions, 0, 64) // Local underpriced transactions to keep diff --git a/core/tx_pool.go b/core/tx_pool.go index 9c958e3b6fae..46ae2759bca2 100644 --- a/core/tx_pool.go +++ b/core/tx_pool.go @@ -123,14 +123,15 @@ type blockChain interface { // TxPoolConfig are the configuration parameters of the transaction pool. type TxPoolConfig struct { - NoLocals bool // Whether local transaction handling should be disabled - Journal string // Journal of local transactions to survive node restarts - Rejournal time.Duration // Time interval to regenerate the local transaction journal + Locals []common.Address // Addresses that should be treated by default as local + NoLocals bool // Whether local transaction handling should be disabled + Journal string // Journal of local transactions to survive node restarts + Rejournal time.Duration // Time interval to regenerate the local transaction journal PriceLimit uint64 // Minimum gas price to enforce for acceptance into the pool PriceBump uint64 // Minimum price bump percentage to replace an already existing transaction (nonce) - AccountSlots uint64 // Minimum number of executable transaction slots guaranteed per account + AccountSlots uint64 // Number of executable transaction slots guaranteed per account GlobalSlots uint64 // Maximum number of executable transaction slots for all accounts AccountQueue uint64 // Maximum number of non-executable transaction slots permitted per account GlobalQueue uint64 // Maximum number of non-executable transaction slots for all accounts @@ -231,6 +232,10 @@ func NewTxPool(config TxPoolConfig, chainconfig *params.ChainConfig, chain block gasPrice: new(big.Int).SetUint64(config.PriceLimit), } pool.locals = newAccountSet(pool.signer) + for _, addr := range config.Locals { + log.Info("Setting new local account", "address", addr) + pool.locals.add(addr) + } pool.priced = newTxPricedList(pool.all) pool.reset(nil, chain.CurrentBlock().Header()) @@ -534,6 +539,14 @@ func (pool *TxPool) Pending() (map[common.Address]types.Transactions, error) { return pending, nil } +// Locals retrieves the accounts currently considered local by the pool. +func (pool *TxPool) Locals() []common.Address { + pool.mu.Lock() + defer pool.mu.Unlock() + + return pool.locals.flatten() +} + // local retrieves all currently known local transactions, groupped by origin // account and sorted by nonce. The returned transaction set is a copy and can be // freely modified by calling code. @@ -665,7 +678,10 @@ func (pool *TxPool) add(tx *types.Transaction, local bool) (bool, error) { } // Mark local addresses and journal local transactions if local { - pool.locals.add(from) + if !pool.locals.contains(from) { + log.Info("Setting new local account", "address", from) + pool.locals.add(from) + } } pool.journalTx(from, tx) @@ -1041,7 +1057,7 @@ func (pool *TxPool) promoteExecutables(accounts []common.Address) { } if queued > pool.config.GlobalQueue { // Sort all accounts with queued transactions by heartbeat - addresses := make(addresssByHeartbeat, 0, len(pool.queue)) + addresses := make(addressesByHeartbeat, 0, len(pool.queue)) for addr := range pool.queue { if !pool.locals.contains(addr) { // don't drop locals addresses = append(addresses, addressByHeartbeat{addr, pool.beats[addr]}) @@ -1127,17 +1143,18 @@ type addressByHeartbeat struct { heartbeat time.Time } -type addresssByHeartbeat []addressByHeartbeat +type addressesByHeartbeat []addressByHeartbeat -func (a addresssByHeartbeat) Len() int { return len(a) } -func (a addresssByHeartbeat) Less(i, j int) bool { return a[i].heartbeat.Before(a[j].heartbeat) } -func (a addresssByHeartbeat) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a addressesByHeartbeat) Len() int { return len(a) } +func (a addressesByHeartbeat) Less(i, j int) bool { return a[i].heartbeat.Before(a[j].heartbeat) } +func (a addressesByHeartbeat) Swap(i, j int) { a[i], a[j] = a[j], a[i] } // accountSet is simply a set of addresses to check for existence, and a signer // capable of deriving addresses from transactions. type accountSet struct { accounts map[common.Address]struct{} signer types.Signer + cache *[]common.Address } // newAccountSet creates a new address set with an associated signer for sender @@ -1167,6 +1184,20 @@ func (as *accountSet) containsTx(tx *types.Transaction) bool { // add inserts a new address into the set to track. func (as *accountSet) add(addr common.Address) { as.accounts[addr] = struct{}{} + as.cache = nil +} + +// flatten returns the list of addresses within this set, also caching it for later +// reuse. The returned slice should not be changed! +func (as *accountSet) flatten() []common.Address { + if as.cache == nil { + accounts := make([]common.Address, 0, len(as.accounts)) + for account := range as.accounts { + accounts = append(accounts, account) + } + as.cache = &accounts + } + return *as.cache } // txLookup is used internally by TxPool to track transactions while allowing lookup without diff --git a/core/types/transaction.go b/core/types/transaction.go index b824a77f61dd..9c6e77be982b 100644 --- a/core/types/transaction.go +++ b/core/types/transaction.go @@ -119,7 +119,7 @@ func isProtectedV(V *big.Int) bool { v := V.Uint64() return v != 27 && v != 28 } - // anything not 27 or 28 are considered unprotected + // anything not 27 or 28 is considered protected return true } @@ -266,9 +266,9 @@ func (s Transactions) GetRlp(i int) []byte { return enc } -// TxDifference returns a new set t which is the difference between a to b. -func TxDifference(a, b Transactions) (keep Transactions) { - keep = make(Transactions, 0, len(a)) +// TxDifference returns a new set which is the difference between a and b. +func TxDifference(a, b Transactions) Transactions { + keep := make(Transactions, 0, len(a)) remove := make(map[common.Hash]struct{}) for _, tx := range b { diff --git a/core/vm/contracts.go b/core/vm/contracts.go index 5cb1cda60bdb..df4ce54db251 100644 --- a/core/vm/contracts.go +++ b/core/vm/contracts.go @@ -121,7 +121,7 @@ func (c *sha256hash) Run(input []byte) ([]byte, error) { return h[:], nil } -// RIPMED160 implemented as a native contract. +// RIPEMD160 implemented as a native contract. type ripemd160hash struct{} // RequiredGas returns the gas required to execute the pre-compiled contract. diff --git a/core/vm/errors.go b/core/vm/errors.go index ea33f13f3357..7f88f324ea13 100644 --- a/core/vm/errors.go +++ b/core/vm/errors.go @@ -26,4 +26,5 @@ var ( ErrTraceLimitReached = errors.New("the number of logs reached the specified limit") ErrInsufficientBalance = errors.New("insufficient balance for transfer") ErrContractAddressCollision = errors.New("contract address collision") + ErrNoCompatibleInterpreter = errors.New("no compatible interpreter") ) diff --git a/core/vm/evm.go b/core/vm/evm.go index 86b4c0ed7bba..8e916fff4e2b 100644 --- a/core/vm/evm.go +++ b/core/vm/evm.go @@ -51,7 +51,20 @@ func run(evm *EVM, contract *Contract, input []byte) ([]byte, error) { return RunPrecompiledContract(p, input, contract) } } - return evm.interpreter.Run(contract, input) + for _, interpreter := range evm.interpreters { + if interpreter.CanRun(contract.Code) { + if evm.interpreter != interpreter { + // Ensure that the interpreter pointer is set back + // to its current value upon return. + defer func(i Interpreter) { + evm.interpreter = i + }(evm.interpreter) + evm.interpreter = interpreter + } + return interpreter.Run(contract, input) + } + } + return nil, ErrNoCompatibleInterpreter } // Context provides the EVM with auxiliary information. Once provided @@ -103,7 +116,8 @@ type EVM struct { vmConfig Config // global (to this context) ethereum virtual machine // used throughout the execution of the tx. - interpreter *Interpreter + interpreters []Interpreter + interpreter Interpreter // abort is used to abort the EVM calling operations // NOTE: must be set atomically abort int32 @@ -119,14 +133,17 @@ type EVM struct { // only ever be used *once*. func NewEVM(ctx Context, statedb StateDB, chainConfig *params.ChainConfig, vmConfig Config) *EVM { evm := &EVM{ - Context: ctx, - StateDB: statedb, - vmConfig: vmConfig, - chainConfig: chainConfig, - chainRules: chainConfig.Rules(ctx.BlockNumber), + Context: ctx, + StateDB: statedb, + vmConfig: vmConfig, + chainConfig: chainConfig, + chainRules: chainConfig.Rules(ctx.BlockNumber), + interpreters: make([]Interpreter, 1), } - evm.interpreter = NewInterpreter(evm, vmConfig) + evm.interpreters[0] = NewEVMInterpreter(evm, vmConfig) + evm.interpreter = evm.interpreters[0] + return evm } @@ -136,6 +153,11 @@ func (evm *EVM) Cancel() { atomic.StoreInt32(&evm.abort, 1) } +// Interpreter returns the current interpreter +func (evm *EVM) Interpreter() Interpreter { + return evm.interpreter +} + // Call executes the contract associated with the addr with the given input as // parameters. It also handles any necessary value transfer required and takes // the necessary steps to create accounts and reverses the state in case of an @@ -164,7 +186,7 @@ func (evm *EVM) Call(caller ContractRef, addr common.Address, input []byte, gas precompiles = PrecompiledContractsByzantium } if precompiles[addr] == nil && evm.ChainConfig().IsEIP158(evm.BlockNumber) && value.Sign() == 0 { - // Calling a non existing account, don't do antything, but ping the tracer + // Calling a non existing account, don't do anything, but ping the tracer if evm.vmConfig.Debug && evm.depth == 0 { evm.vmConfig.Tracer.CaptureStart(caller.Address(), addr, false, input, gas, value) evm.vmConfig.Tracer.CaptureEnd(ret, 0, 0, nil) @@ -293,9 +315,9 @@ func (evm *EVM) StaticCall(caller ContractRef, addr common.Address, input []byte // Make sure the readonly is only set if we aren't in readonly yet // this makes also sure that the readonly flag isn't removed for // child calls. - if !evm.interpreter.readOnly { - evm.interpreter.readOnly = true - defer func() { evm.interpreter.readOnly = false }() + if !evm.interpreter.IsReadOnly() { + evm.interpreter.SetReadOnly(true) + defer func() { evm.interpreter.SetReadOnly(false) }() } var ( @@ -321,9 +343,8 @@ func (evm *EVM) StaticCall(caller ContractRef, addr common.Address, input []byte return ret, contract.Gas, err } -// Create creates a new contract using code as deployment code. -func (evm *EVM) Create(caller ContractRef, code []byte, gas uint64, value *big.Int) (ret []byte, contractAddr common.Address, leftOverGas uint64, err error) { - +// create creates a new contract using code as deployment code. +func (evm *EVM) create(caller ContractRef, code []byte, gas uint64, value *big.Int, address common.Address) ([]byte, common.Address, uint64, error) { // Depth check execution. Fail if we're trying to execute above the // limit. if evm.depth > int(params.CallCreateDepth) { @@ -332,39 +353,38 @@ func (evm *EVM) Create(caller ContractRef, code []byte, gas uint64, value *big.I if !evm.CanTransfer(evm.StateDB, caller.Address(), value) { return nil, common.Address{}, gas, ErrInsufficientBalance } - // Ensure there's no existing contract already at the designated address nonce := evm.StateDB.GetNonce(caller.Address()) evm.StateDB.SetNonce(caller.Address(), nonce+1) - contractAddr = crypto.CreateAddress(caller.Address(), nonce) - contractHash := evm.StateDB.GetCodeHash(contractAddr) - if evm.StateDB.GetNonce(contractAddr) != 0 || (contractHash != (common.Hash{}) && contractHash != emptyCodeHash) { + // Ensure there's no existing contract already at the designated address + contractHash := evm.StateDB.GetCodeHash(address) + if evm.StateDB.GetNonce(address) != 0 || (contractHash != (common.Hash{}) && contractHash != emptyCodeHash) { return nil, common.Address{}, 0, ErrContractAddressCollision } // Create a new account on the state snapshot := evm.StateDB.Snapshot() - evm.StateDB.CreateAccount(contractAddr) + evm.StateDB.CreateAccount(address) if evm.ChainConfig().IsEIP158(evm.BlockNumber) { - evm.StateDB.SetNonce(contractAddr, 1) + evm.StateDB.SetNonce(address, 1) } - evm.Transfer(evm.StateDB, caller.Address(), contractAddr, value) + evm.Transfer(evm.StateDB, caller.Address(), address, value) // initialise a new contract and set the code that is to be used by the // EVM. The contract is a scoped environment for this execution context // only. - contract := NewContract(caller, AccountRef(contractAddr), value, gas) - contract.SetCallCode(&contractAddr, crypto.Keccak256Hash(code), code) + contract := NewContract(caller, AccountRef(address), value, gas) + contract.SetCallCode(&address, crypto.Keccak256Hash(code), code) if evm.vmConfig.NoRecursion && evm.depth > 0 { - return nil, contractAddr, gas, nil + return nil, address, gas, nil } if evm.vmConfig.Debug && evm.depth == 0 { - evm.vmConfig.Tracer.CaptureStart(caller.Address(), contractAddr, true, code, gas, value) + evm.vmConfig.Tracer.CaptureStart(caller.Address(), address, true, code, gas, value) } start := time.Now() - ret, err = run(evm, contract, nil) + ret, err := run(evm, contract, nil) // check whether the max code size has been exceeded maxCodeSizeExceeded := evm.ChainConfig().IsEIP158(evm.BlockNumber) && len(ret) > params.MaxCodeSize @@ -375,7 +395,7 @@ func (evm *EVM) Create(caller ContractRef, code []byte, gas uint64, value *big.I if err == nil && !maxCodeSizeExceeded { createDataGas := uint64(len(ret)) * params.CreateDataGas if contract.UseGas(createDataGas) { - evm.StateDB.SetCode(contractAddr, ret) + evm.StateDB.SetCode(address, ret) } else { err = ErrCodeStoreOutOfGas } @@ -397,11 +417,24 @@ func (evm *EVM) Create(caller ContractRef, code []byte, gas uint64, value *big.I if evm.vmConfig.Debug && evm.depth == 0 { evm.vmConfig.Tracer.CaptureEnd(ret, gas-contract.Gas, time.Since(start), err) } - return ret, contractAddr, contract.Gas, err + return ret, address, contract.Gas, err + +} + +// Create creates a new contract using code as deployment code. +func (evm *EVM) Create(caller ContractRef, code []byte, gas uint64, value *big.Int) (ret []byte, contractAddr common.Address, leftOverGas uint64, err error) { + contractAddr = crypto.CreateAddress(caller.Address(), evm.StateDB.GetNonce(caller.Address())) + return evm.create(caller, code, gas, value, contractAddr) +} + +// Create2 creates a new contract using code as deployment code. +// +// The different between Create2 with Create is Create2 uses sha3(0xff ++ msg.sender ++ salt ++ sha3(init_code))[12:] +// instead of the usual sender-and-nonce-hash as the address where the contract is initialized at. +func (evm *EVM) Create2(caller ContractRef, code []byte, gas uint64, endowment *big.Int, salt *big.Int) (ret []byte, contractAddr common.Address, leftOverGas uint64, err error) { + contractAddr = crypto.CreateAddress2(caller.Address(), common.BigToHash(salt), code) + return evm.create(caller, code, gas, endowment, contractAddr) } // ChainConfig returns the environment's chain configuration func (evm *EVM) ChainConfig() *params.ChainConfig { return evm.chainConfig } - -// Interpreter returns the EVM interpreter -func (evm *EVM) Interpreter() *Interpreter { return evm.interpreter } diff --git a/core/vm/gas_table.go b/core/vm/gas_table.go index 0764c67a4d15..f9eea319e121 100644 --- a/core/vm/gas_table.go +++ b/core/vm/gas_table.go @@ -241,6 +241,10 @@ func gasExtCodeCopy(gt params.GasTable, evm *EVM, contract *Contract, stack *Sta return gas, nil } +func gasExtCodeHash(gt params.GasTable, evm *EVM, contract *Contract, stack *Stack, mem *Memory, memorySize uint64) (uint64, error) { + return gt.ExtcodeHash, nil +} + func gasMLoad(gt params.GasTable, evm *EVM, contract *Contract, stack *Stack, mem *Memory, memorySize uint64) (uint64, error) { var overflow bool gas, err := memoryGasCost(mem, memorySize) @@ -289,6 +293,18 @@ func gasCreate(gt params.GasTable, evm *EVM, contract *Contract, stack *Stack, m return gas, nil } +func gasCreate2(gt params.GasTable, evm *EVM, contract *Contract, stack *Stack, mem *Memory, memorySize uint64) (uint64, error) { + var overflow bool + gas, err := memoryGasCost(mem, memorySize) + if err != nil { + return 0, err + } + if gas, overflow = math.SafeAdd(gas, params.Create2Gas); overflow { + return 0, errGasUintOverflow + } + return gas, nil +} + func gasBalance(gt params.GasTable, evm *EVM, contract *Contract, stack *Stack, mem *Memory, memorySize uint64) (uint64, error) { return gt.Balance, nil } diff --git a/core/vm/instructions.go b/core/vm/instructions.go index d9fc1d152d93..c8b6efb5a909 100644 --- a/core/vm/instructions.go +++ b/core/vm/instructions.go @@ -38,45 +38,45 @@ var ( errMaxCodeSizeExceeded = errors.New("evm: max code size exceeded") ) -func opAdd(pc *uint64, evm *EVM, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) { +func opAdd(pc *uint64, interpreter *EVMInterpreter, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) { x, y := stack.pop(), stack.peek() math.U256(y.Add(x, y)) - evm.interpreter.intPool.put(x) + interpreter.intPool.put(x) return nil, nil } -func opSub(pc *uint64, evm *EVM, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) { +func opSub(pc *uint64, interpreter *EVMInterpreter, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) { x, y := stack.pop(), stack.peek() math.U256(y.Sub(x, y)) - evm.interpreter.intPool.put(x) + interpreter.intPool.put(x) return nil, nil } -func opMul(pc *uint64, evm *EVM, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) { +func opMul(pc *uint64, interpreter *EVMInterpreter, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) { x, y := stack.pop(), stack.pop() stack.push(math.U256(x.Mul(x, y))) - evm.interpreter.intPool.put(y) + interpreter.intPool.put(y) return nil, nil } -func opDiv(pc *uint64, evm *EVM, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) { +func opDiv(pc *uint64, interpreter *EVMInterpreter, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) { x, y := stack.pop(), stack.peek() if y.Sign() != 0 { math.U256(y.Div(x, y)) } else { y.SetUint64(0) } - evm.interpreter.intPool.put(x) + interpreter.intPool.put(x) return nil, nil } -func opSdiv(pc *uint64, evm *EVM, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) { +func opSdiv(pc *uint64, interpreter *EVMInterpreter, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) { x, y := math.S256(stack.pop()), math.S256(stack.pop()) - res := evm.interpreter.intPool.getZero() + res := interpreter.intPool.getZero() if y.Sign() == 0 || x.Sign() == 0 { stack.push(res) @@ -89,24 +89,24 @@ func opSdiv(pc *uint64, evm *EVM, contract *Contract, memory *Memory, stack *Sta } stack.push(math.U256(res)) } - evm.interpreter.intPool.put(x, y) + interpreter.intPool.put(x, y) return nil, nil } -func opMod(pc *uint64, evm *EVM, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) { +func opMod(pc *uint64, interpreter *EVMInterpreter, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) { x, y := stack.pop(), stack.pop() if y.Sign() == 0 { stack.push(x.SetUint64(0)) } else { stack.push(math.U256(x.Mod(x, y))) } - evm.interpreter.intPool.put(y) + interpreter.intPool.put(y) return nil, nil } -func opSmod(pc *uint64, evm *EVM, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) { +func opSmod(pc *uint64, interpreter *EVMInterpreter, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) { x, y := math.S256(stack.pop()), math.S256(stack.pop()) - res := evm.interpreter.intPool.getZero() + res := interpreter.intPool.getZero() if y.Sign() == 0 { stack.push(res) @@ -119,20 +119,20 @@ func opSmod(pc *uint64, evm *EVM, contract *Contract, memory *Memory, stack *Sta } stack.push(math.U256(res)) } - evm.interpreter.intPool.put(x, y) + interpreter.intPool.put(x, y) return nil, nil } -func opExp(pc *uint64, evm *EVM, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) { +func opExp(pc *uint64, interpreter *EVMInterpreter, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) { base, exponent := stack.pop(), stack.pop() stack.push(math.Exp(base, exponent)) - evm.interpreter.intPool.put(base, exponent) + interpreter.intPool.put(base, exponent) return nil, nil } -func opSignExtend(pc *uint64, evm *EVM, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) { +func opSignExtend(pc *uint64, interpreter *EVMInterpreter, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) { back := stack.pop() if back.Cmp(big.NewInt(31)) < 0 { bit := uint(back.Uint64()*8 + 7) @@ -148,39 +148,39 @@ func opSignExtend(pc *uint64, evm *EVM, contract *Contract, memory *Memory, stac stack.push(math.U256(num)) } - evm.interpreter.intPool.put(back) + interpreter.intPool.put(back) return nil, nil } -func opNot(pc *uint64, evm *EVM, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) { +func opNot(pc *uint64, interpreter *EVMInterpreter, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) { x := stack.peek() math.U256(x.Not(x)) return nil, nil } -func opLt(pc *uint64, evm *EVM, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) { +func opLt(pc *uint64, interpreter *EVMInterpreter, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) { x, y := stack.pop(), stack.peek() if x.Cmp(y) < 0 { y.SetUint64(1) } else { y.SetUint64(0) } - evm.interpreter.intPool.put(x) + interpreter.intPool.put(x) return nil, nil } -func opGt(pc *uint64, evm *EVM, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) { +func opGt(pc *uint64, interpreter *EVMInterpreter, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) { x, y := stack.pop(), stack.peek() if x.Cmp(y) > 0 { y.SetUint64(1) } else { y.SetUint64(0) } - evm.interpreter.intPool.put(x) + interpreter.intPool.put(x) return nil, nil } -func opSlt(pc *uint64, evm *EVM, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) { +func opSlt(pc *uint64, interpreter *EVMInterpreter, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) { x, y := stack.pop(), stack.peek() xSign := x.Cmp(tt255) @@ -200,11 +200,11 @@ func opSlt(pc *uint64, evm *EVM, contract *Contract, memory *Memory, stack *Stac y.SetUint64(0) } } - evm.interpreter.intPool.put(x) + interpreter.intPool.put(x) return nil, nil } -func opSgt(pc *uint64, evm *EVM, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) { +func opSgt(pc *uint64, interpreter *EVMInterpreter, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) { x, y := stack.pop(), stack.peek() xSign := x.Cmp(tt255) @@ -224,22 +224,22 @@ func opSgt(pc *uint64, evm *EVM, contract *Contract, memory *Memory, stack *Stac y.SetUint64(0) } } - evm.interpreter.intPool.put(x) + interpreter.intPool.put(x) return nil, nil } -func opEq(pc *uint64, evm *EVM, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) { +func opEq(pc *uint64, interpreter *EVMInterpreter, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) { x, y := stack.pop(), stack.peek() if x.Cmp(y) == 0 { y.SetUint64(1) } else { y.SetUint64(0) } - evm.interpreter.intPool.put(x) + interpreter.intPool.put(x) return nil, nil } -func opIszero(pc *uint64, evm *EVM, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) { +func opIszero(pc *uint64, interpreter *EVMInterpreter, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) { x := stack.peek() if x.Sign() > 0 { x.SetUint64(0) @@ -249,31 +249,31 @@ func opIszero(pc *uint64, evm *EVM, contract *Contract, memory *Memory, stack *S return nil, nil } -func opAnd(pc *uint64, evm *EVM, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) { +func opAnd(pc *uint64, interpreter *EVMInterpreter, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) { x, y := stack.pop(), stack.pop() stack.push(x.And(x, y)) - evm.interpreter.intPool.put(y) + interpreter.intPool.put(y) return nil, nil } -func opOr(pc *uint64, evm *EVM, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) { +func opOr(pc *uint64, interpreter *EVMInterpreter, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) { x, y := stack.pop(), stack.peek() y.Or(x, y) - evm.interpreter.intPool.put(x) + interpreter.intPool.put(x) return nil, nil } -func opXor(pc *uint64, evm *EVM, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) { +func opXor(pc *uint64, interpreter *EVMInterpreter, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) { x, y := stack.pop(), stack.peek() y.Xor(x, y) - evm.interpreter.intPool.put(x) + interpreter.intPool.put(x) return nil, nil } -func opByte(pc *uint64, evm *EVM, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) { +func opByte(pc *uint64, interpreter *EVMInterpreter, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) { th, val := stack.pop(), stack.peek() if th.Cmp(common.Big32) < 0 { b := math.Byte(val, 32, int(th.Int64())) @@ -281,11 +281,11 @@ func opByte(pc *uint64, evm *EVM, contract *Contract, memory *Memory, stack *Sta } else { val.SetUint64(0) } - evm.interpreter.intPool.put(th) + interpreter.intPool.put(th) return nil, nil } -func opAddmod(pc *uint64, evm *EVM, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) { +func opAddmod(pc *uint64, interpreter *EVMInterpreter, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) { x, y, z := stack.pop(), stack.pop(), stack.pop() if z.Cmp(bigZero) > 0 { x.Add(x, y) @@ -294,11 +294,11 @@ func opAddmod(pc *uint64, evm *EVM, contract *Contract, memory *Memory, stack *S } else { stack.push(x.SetUint64(0)) } - evm.interpreter.intPool.put(y, z) + interpreter.intPool.put(y, z) return nil, nil } -func opMulmod(pc *uint64, evm *EVM, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) { +func opMulmod(pc *uint64, interpreter *EVMInterpreter, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) { x, y, z := stack.pop(), stack.pop(), stack.pop() if z.Cmp(bigZero) > 0 { x.Mul(x, y) @@ -307,17 +307,17 @@ func opMulmod(pc *uint64, evm *EVM, contract *Contract, memory *Memory, stack *S } else { stack.push(x.SetUint64(0)) } - evm.interpreter.intPool.put(y, z) + interpreter.intPool.put(y, z) return nil, nil } // opSHL implements Shift Left // The SHL instruction (shift left) pops 2 values from the stack, first arg1 and then arg2, // and pushes on the stack arg2 shifted to the left by arg1 number of bits. -func opSHL(pc *uint64, evm *EVM, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) { +func opSHL(pc *uint64, interpreter *EVMInterpreter, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) { // Note, second operand is left in the stack; accumulate result into it, and no need to push it afterwards shift, value := math.U256(stack.pop()), math.U256(stack.peek()) - defer evm.interpreter.intPool.put(shift) // First operand back into the pool + defer interpreter.intPool.put(shift) // First operand back into the pool if shift.Cmp(common.Big256) >= 0 { value.SetUint64(0) @@ -332,10 +332,10 @@ func opSHL(pc *uint64, evm *EVM, contract *Contract, memory *Memory, stack *Stac // opSHR implements Logical Shift Right // The SHR instruction (logical shift right) pops 2 values from the stack, first arg1 and then arg2, // and pushes on the stack arg2 shifted to the right by arg1 number of bits with zero fill. -func opSHR(pc *uint64, evm *EVM, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) { +func opSHR(pc *uint64, interpreter *EVMInterpreter, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) { // Note, second operand is left in the stack; accumulate result into it, and no need to push it afterwards shift, value := math.U256(stack.pop()), math.U256(stack.peek()) - defer evm.interpreter.intPool.put(shift) // First operand back into the pool + defer interpreter.intPool.put(shift) // First operand back into the pool if shift.Cmp(common.Big256) >= 0 { value.SetUint64(0) @@ -350,10 +350,10 @@ func opSHR(pc *uint64, evm *EVM, contract *Contract, memory *Memory, stack *Stac // opSAR implements Arithmetic Shift Right // The SAR instruction (arithmetic shift right) pops 2 values from the stack, first arg1 and then arg2, // and pushes on the stack arg2 shifted to the right by arg1 number of bits with sign extension. -func opSAR(pc *uint64, evm *EVM, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) { +func opSAR(pc *uint64, interpreter *EVMInterpreter, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) { // Note, S256 returns (potentially) a new bigint, so we're popping, not peeking this one shift, value := math.U256(stack.pop()), math.S256(stack.pop()) - defer evm.interpreter.intPool.put(shift) // First operand back into the pool + defer interpreter.intPool.put(shift) // First operand back into the pool if shift.Cmp(common.Big256) >= 0 { if value.Sign() > 0 { @@ -371,57 +371,58 @@ func opSAR(pc *uint64, evm *EVM, contract *Contract, memory *Memory, stack *Stac return nil, nil } -func opSha3(pc *uint64, evm *EVM, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) { +func opSha3(pc *uint64, interpreter *EVMInterpreter, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) { offset, size := stack.pop(), stack.pop() data := memory.Get(offset.Int64(), size.Int64()) hash := crypto.Keccak256(data) + evm := interpreter.evm if evm.vmConfig.EnablePreimageRecording { evm.StateDB.AddPreimage(common.BytesToHash(hash), data) } - stack.push(evm.interpreter.intPool.get().SetBytes(hash)) + stack.push(interpreter.intPool.get().SetBytes(hash)) - evm.interpreter.intPool.put(offset, size) + interpreter.intPool.put(offset, size) return nil, nil } -func opAddress(pc *uint64, evm *EVM, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) { +func opAddress(pc *uint64, interpreter *EVMInterpreter, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) { stack.push(contract.Address().Big()) return nil, nil } -func opBalance(pc *uint64, evm *EVM, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) { +func opBalance(pc *uint64, interpreter *EVMInterpreter, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) { slot := stack.peek() - slot.Set(evm.StateDB.GetBalance(common.BigToAddress(slot))) + slot.Set(interpreter.evm.StateDB.GetBalance(common.BigToAddress(slot))) return nil, nil } -func opOrigin(pc *uint64, evm *EVM, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) { - stack.push(evm.Origin.Big()) +func opOrigin(pc *uint64, interpreter *EVMInterpreter, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) { + stack.push(interpreter.evm.Origin.Big()) return nil, nil } -func opCaller(pc *uint64, evm *EVM, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) { +func opCaller(pc *uint64, interpreter *EVMInterpreter, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) { stack.push(contract.Caller().Big()) return nil, nil } -func opCallValue(pc *uint64, evm *EVM, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) { - stack.push(evm.interpreter.intPool.get().Set(contract.value)) +func opCallValue(pc *uint64, interpreter *EVMInterpreter, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) { + stack.push(interpreter.intPool.get().Set(contract.value)) return nil, nil } -func opCallDataLoad(pc *uint64, evm *EVM, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) { - stack.push(evm.interpreter.intPool.get().SetBytes(getDataBig(contract.Input, stack.pop(), big32))) +func opCallDataLoad(pc *uint64, interpreter *EVMInterpreter, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) { + stack.push(interpreter.intPool.get().SetBytes(getDataBig(contract.Input, stack.pop(), big32))) return nil, nil } -func opCallDataSize(pc *uint64, evm *EVM, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) { - stack.push(evm.interpreter.intPool.get().SetInt64(int64(len(contract.Input)))) +func opCallDataSize(pc *uint64, interpreter *EVMInterpreter, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) { + stack.push(interpreter.intPool.get().SetInt64(int64(len(contract.Input)))) return nil, nil } -func opCallDataCopy(pc *uint64, evm *EVM, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) { +func opCallDataCopy(pc *uint64, interpreter *EVMInterpreter, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) { var ( memOffset = stack.pop() dataOffset = stack.pop() @@ -429,48 +430,48 @@ func opCallDataCopy(pc *uint64, evm *EVM, contract *Contract, memory *Memory, st ) memory.Set(memOffset.Uint64(), length.Uint64(), getDataBig(contract.Input, dataOffset, length)) - evm.interpreter.intPool.put(memOffset, dataOffset, length) + interpreter.intPool.put(memOffset, dataOffset, length) return nil, nil } -func opReturnDataSize(pc *uint64, evm *EVM, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) { - stack.push(evm.interpreter.intPool.get().SetUint64(uint64(len(evm.interpreter.returnData)))) +func opReturnDataSize(pc *uint64, interpreter *EVMInterpreter, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) { + stack.push(interpreter.intPool.get().SetUint64(uint64(len(interpreter.returnData)))) return nil, nil } -func opReturnDataCopy(pc *uint64, evm *EVM, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) { +func opReturnDataCopy(pc *uint64, interpreter *EVMInterpreter, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) { var ( memOffset = stack.pop() dataOffset = stack.pop() length = stack.pop() - end = evm.interpreter.intPool.get().Add(dataOffset, length) + end = interpreter.intPool.get().Add(dataOffset, length) ) - defer evm.interpreter.intPool.put(memOffset, dataOffset, length, end) + defer interpreter.intPool.put(memOffset, dataOffset, length, end) - if end.BitLen() > 64 || uint64(len(evm.interpreter.returnData)) < end.Uint64() { + if end.BitLen() > 64 || uint64(len(interpreter.returnData)) < end.Uint64() { return nil, errReturnDataOutOfBounds } - memory.Set(memOffset.Uint64(), length.Uint64(), evm.interpreter.returnData[dataOffset.Uint64():end.Uint64()]) + memory.Set(memOffset.Uint64(), length.Uint64(), interpreter.returnData[dataOffset.Uint64():end.Uint64()]) return nil, nil } -func opExtCodeSize(pc *uint64, evm *EVM, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) { +func opExtCodeSize(pc *uint64, interpreter *EVMInterpreter, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) { slot := stack.peek() - slot.SetUint64(uint64(evm.StateDB.GetCodeSize(common.BigToAddress(slot)))) + slot.SetUint64(uint64(interpreter.evm.StateDB.GetCodeSize(common.BigToAddress(slot)))) return nil, nil } -func opCodeSize(pc *uint64, evm *EVM, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) { - l := evm.interpreter.intPool.get().SetInt64(int64(len(contract.Code))) +func opCodeSize(pc *uint64, interpreter *EVMInterpreter, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) { + l := interpreter.intPool.get().SetInt64(int64(len(contract.Code))) stack.push(l) return nil, nil } -func opCodeCopy(pc *uint64, evm *EVM, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) { +func opCodeCopy(pc *uint64, interpreter *EVMInterpreter, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) { var ( memOffset = stack.pop() codeOffset = stack.pop() @@ -479,114 +480,146 @@ func opCodeCopy(pc *uint64, evm *EVM, contract *Contract, memory *Memory, stack codeCopy := getDataBig(contract.Code, codeOffset, length) memory.Set(memOffset.Uint64(), length.Uint64(), codeCopy) - evm.interpreter.intPool.put(memOffset, codeOffset, length) + interpreter.intPool.put(memOffset, codeOffset, length) return nil, nil } -func opExtCodeCopy(pc *uint64, evm *EVM, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) { +func opExtCodeCopy(pc *uint64, interpreter *EVMInterpreter, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) { var ( addr = common.BigToAddress(stack.pop()) memOffset = stack.pop() codeOffset = stack.pop() length = stack.pop() ) - codeCopy := getDataBig(evm.StateDB.GetCode(addr), codeOffset, length) + codeCopy := getDataBig(interpreter.evm.StateDB.GetCode(addr), codeOffset, length) memory.Set(memOffset.Uint64(), length.Uint64(), codeCopy) - evm.interpreter.intPool.put(memOffset, codeOffset, length) + interpreter.intPool.put(memOffset, codeOffset, length) return nil, nil } -func opGasprice(pc *uint64, evm *EVM, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) { - stack.push(evm.interpreter.intPool.get().Set(evm.GasPrice)) +// opExtCodeHash returns the code hash of a specified account. +// There are several cases when the function is called, while we can relay everything +// to `state.GetCodeHash` function to ensure the correctness. +// (1) Caller tries to get the code hash of a normal contract account, state +// should return the relative code hash and set it as the result. +// +// (2) Caller tries to get the code hash of a non-existent account, state should +// return common.Hash{} and zero will be set as the result. +// +// (3) Caller tries to get the code hash for an account without contract code, +// state should return emptyCodeHash(0xc5d246...) as the result. +// +// (4) Caller tries to get the code hash of a precompiled account, the result +// should be zero or emptyCodeHash. +// +// It is worth noting that in order to avoid unnecessary create and clean, +// all precompile accounts on mainnet have been transferred 1 wei, so the return +// here should be emptyCodeHash. +// If the precompile account is not transferred any amount on a private or +// customized chain, the return value will be zero. +// +// (5) Caller tries to get the code hash for an account which is marked as suicided +// in the current transaction, the code hash of this account should be returned. +// +// (6) Caller tries to get the code hash for an account which is marked as deleted, +// this account should be regarded as a non-existent account and zero should be returned. +func opExtCodeHash(pc *uint64, interpreter *EVMInterpreter, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) { + slot := stack.peek() + slot.SetBytes(interpreter.evm.StateDB.GetCodeHash(common.BigToAddress(slot)).Bytes()) return nil, nil } -func opBlockhash(pc *uint64, evm *EVM, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) { +func opGasprice(pc *uint64, interpreter *EVMInterpreter, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) { + stack.push(interpreter.intPool.get().Set(interpreter.evm.GasPrice)) + return nil, nil +} + +func opBlockhash(pc *uint64, interpreter *EVMInterpreter, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) { num := stack.pop() - n := evm.interpreter.intPool.get().Sub(evm.BlockNumber, common.Big257) - if num.Cmp(n) > 0 && num.Cmp(evm.BlockNumber) < 0 { - stack.push(evm.GetHash(num.Uint64()).Big()) + n := interpreter.intPool.get().Sub(interpreter.evm.BlockNumber, common.Big257) + if num.Cmp(n) > 0 && num.Cmp(interpreter.evm.BlockNumber) < 0 { + stack.push(interpreter.evm.GetHash(num.Uint64()).Big()) } else { - stack.push(evm.interpreter.intPool.getZero()) + stack.push(interpreter.intPool.getZero()) } - evm.interpreter.intPool.put(num, n) + interpreter.intPool.put(num, n) return nil, nil } -func opCoinbase(pc *uint64, evm *EVM, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) { - stack.push(evm.Coinbase.Big()) +func opCoinbase(pc *uint64, interpreter *EVMInterpreter, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) { + stack.push(interpreter.evm.Coinbase.Big()) return nil, nil } -func opTimestamp(pc *uint64, evm *EVM, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) { - stack.push(math.U256(evm.interpreter.intPool.get().Set(evm.Time))) +func opTimestamp(pc *uint64, interpreter *EVMInterpreter, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) { + stack.push(math.U256(interpreter.intPool.get().Set(interpreter.evm.Time))) return nil, nil } -func opNumber(pc *uint64, evm *EVM, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) { - stack.push(math.U256(evm.interpreter.intPool.get().Set(evm.BlockNumber))) +func opNumber(pc *uint64, interpreter *EVMInterpreter, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) { + stack.push(math.U256(interpreter.intPool.get().Set(interpreter.evm.BlockNumber))) return nil, nil } -func opDifficulty(pc *uint64, evm *EVM, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) { - stack.push(math.U256(evm.interpreter.intPool.get().Set(evm.Difficulty))) +func opDifficulty(pc *uint64, interpreter *EVMInterpreter, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) { + stack.push(math.U256(interpreter.intPool.get().Set(interpreter.evm.Difficulty))) return nil, nil } -func opGasLimit(pc *uint64, evm *EVM, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) { - stack.push(math.U256(evm.interpreter.intPool.get().SetUint64(evm.GasLimit))) +func opGasLimit(pc *uint64, interpreter *EVMInterpreter, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) { + stack.push(math.U256(interpreter.intPool.get().SetUint64(interpreter.evm.GasLimit))) return nil, nil } -func opPop(pc *uint64, evm *EVM, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) { - evm.interpreter.intPool.put(stack.pop()) +func opPop(pc *uint64, interpreter *EVMInterpreter, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) { + interpreter.intPool.put(stack.pop()) return nil, nil } -func opMload(pc *uint64, evm *EVM, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) { +func opMload(pc *uint64, interpreter *EVMInterpreter, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) { offset := stack.pop() - val := evm.interpreter.intPool.get().SetBytes(memory.Get(offset.Int64(), 32)) + val := interpreter.intPool.get().SetBytes(memory.Get(offset.Int64(), 32)) stack.push(val) - evm.interpreter.intPool.put(offset) + interpreter.intPool.put(offset) return nil, nil } -func opMstore(pc *uint64, evm *EVM, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) { +func opMstore(pc *uint64, interpreter *EVMInterpreter, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) { // pop value of the stack mStart, val := stack.pop(), stack.pop() memory.Set32(mStart.Uint64(), val) - evm.interpreter.intPool.put(mStart, val) + interpreter.intPool.put(mStart, val) return nil, nil } -func opMstore8(pc *uint64, evm *EVM, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) { +func opMstore8(pc *uint64, interpreter *EVMInterpreter, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) { off, val := stack.pop().Int64(), stack.pop().Int64() memory.store[off] = byte(val & 0xff) return nil, nil } -func opSload(pc *uint64, evm *EVM, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) { +func opSload(pc *uint64, interpreter *EVMInterpreter, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) { loc := stack.peek() - val := evm.StateDB.GetState(contract.Address(), common.BigToHash(loc)) + val := interpreter.evm.StateDB.GetState(contract.Address(), common.BigToHash(loc)) loc.SetBytes(val.Bytes()) return nil, nil } -func opSstore(pc *uint64, evm *EVM, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) { +func opSstore(pc *uint64, interpreter *EVMInterpreter, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) { loc := common.BigToHash(stack.pop()) val := stack.pop() - evm.StateDB.SetState(contract.Address(), loc, common.BigToHash(val)) + interpreter.evm.StateDB.SetState(contract.Address(), loc, common.BigToHash(val)) - evm.interpreter.intPool.put(val) + interpreter.intPool.put(val) return nil, nil } -func opJump(pc *uint64, evm *EVM, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) { +func opJump(pc *uint64, interpreter *EVMInterpreter, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) { pos := stack.pop() if !contract.jumpdests.has(contract.CodeHash, contract.Code, pos) { nop := contract.GetOp(pos.Uint64()) @@ -594,11 +627,11 @@ func opJump(pc *uint64, evm *EVM, contract *Contract, memory *Memory, stack *Sta } *pc = pos.Uint64() - evm.interpreter.intPool.put(pos) + interpreter.intPool.put(pos) return nil, nil } -func opJumpi(pc *uint64, evm *EVM, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) { +func opJumpi(pc *uint64, interpreter *EVMInterpreter, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) { pos, cond := stack.pop(), stack.pop() if cond.Sign() != 0 { if !contract.jumpdests.has(contract.CodeHash, contract.Code, pos) { @@ -610,55 +643,83 @@ func opJumpi(pc *uint64, evm *EVM, contract *Contract, memory *Memory, stack *St *pc++ } - evm.interpreter.intPool.put(pos, cond) + interpreter.intPool.put(pos, cond) return nil, nil } -func opJumpdest(pc *uint64, evm *EVM, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) { +func opJumpdest(pc *uint64, interpreter *EVMInterpreter, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) { return nil, nil } -func opPc(pc *uint64, evm *EVM, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) { - stack.push(evm.interpreter.intPool.get().SetUint64(*pc)) +func opPc(pc *uint64, interpreter *EVMInterpreter, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) { + stack.push(interpreter.intPool.get().SetUint64(*pc)) return nil, nil } -func opMsize(pc *uint64, evm *EVM, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) { - stack.push(evm.interpreter.intPool.get().SetInt64(int64(memory.Len()))) +func opMsize(pc *uint64, interpreter *EVMInterpreter, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) { + stack.push(interpreter.intPool.get().SetInt64(int64(memory.Len()))) return nil, nil } -func opGas(pc *uint64, evm *EVM, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) { - stack.push(evm.interpreter.intPool.get().SetUint64(contract.Gas)) +func opGas(pc *uint64, interpreter *EVMInterpreter, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) { + stack.push(interpreter.intPool.get().SetUint64(contract.Gas)) return nil, nil } -func opCreate(pc *uint64, evm *EVM, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) { +func opCreate(pc *uint64, interpreter *EVMInterpreter, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) { var ( value = stack.pop() offset, size = stack.pop(), stack.pop() input = memory.Get(offset.Int64(), size.Int64()) gas = contract.Gas ) - if evm.ChainConfig().IsEIP150(evm.BlockNumber) { + if interpreter.evm.ChainConfig().IsEIP150(interpreter.evm.BlockNumber) { gas -= gas / 64 } contract.UseGas(gas) - res, addr, returnGas, suberr := evm.Create(contract, input, gas, value) + res, addr, returnGas, suberr := interpreter.evm.Create(contract, input, gas, value) // Push item on the stack based on the returned error. If the ruleset is // homestead we must check for CodeStoreOutOfGasError (homestead only // rule) and treat as an error, if the ruleset is frontier we must // ignore this error and pretend the operation was successful. - if evm.ChainConfig().IsHomestead(evm.BlockNumber) && suberr == ErrCodeStoreOutOfGas { - stack.push(evm.interpreter.intPool.getZero()) + if interpreter.evm.ChainConfig().IsHomestead(interpreter.evm.BlockNumber) && suberr == ErrCodeStoreOutOfGas { + stack.push(interpreter.intPool.getZero()) } else if suberr != nil && suberr != ErrCodeStoreOutOfGas { - stack.push(evm.interpreter.intPool.getZero()) + stack.push(interpreter.intPool.getZero()) + } else { + stack.push(addr.Big()) + } + contract.Gas += returnGas + interpreter.intPool.put(value, offset, size) + + if suberr == errExecutionReverted { + return res, nil + } + return nil, nil +} + +func opCreate2(pc *uint64, interpreter *EVMInterpreter, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) { + var ( + endowment = stack.pop() + offset, size = stack.pop(), stack.pop() + salt = stack.pop() + input = memory.Get(offset.Int64(), size.Int64()) + gas = contract.Gas + ) + + // Apply EIP150 + gas -= gas / 64 + contract.UseGas(gas) + res, addr, returnGas, suberr := interpreter.evm.Create2(contract, input, gas, endowment, salt) + // Push item on the stack based on the returned error. + if suberr != nil { + stack.push(interpreter.intPool.getZero()) } else { stack.push(addr.Big()) } contract.Gas += returnGas - evm.interpreter.intPool.put(value, offset, size) + interpreter.intPool.put(endowment, offset, size, salt) if suberr == errExecutionReverted { return res, nil @@ -666,10 +727,10 @@ func opCreate(pc *uint64, evm *EVM, contract *Contract, memory *Memory, stack *S return nil, nil } -func opCall(pc *uint64, evm *EVM, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) { - // Pop gas. The actual gas in in evm.callGasTemp. - evm.interpreter.intPool.put(stack.pop()) - gas := evm.callGasTemp +func opCall(pc *uint64, interpreter *EVMInterpreter, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) { + // Pop gas. The actual gas in in interpreter.evm.callGasTemp. + interpreter.intPool.put(stack.pop()) + gas := interpreter.evm.callGasTemp // Pop other call parameters. addr, value, inOffset, inSize, retOffset, retSize := stack.pop(), stack.pop(), stack.pop(), stack.pop(), stack.pop(), stack.pop() toAddr := common.BigToAddress(addr) @@ -680,29 +741,29 @@ func opCall(pc *uint64, evm *EVM, contract *Contract, memory *Memory, stack *Sta if value.Sign() != 0 { gas += params.CallStipend } - ret, returnGas, err := evm.Call(contract, toAddr, args, gas, value) + ret, returnGas, err := interpreter.evm.Call(contract, toAddr, args, gas, value) if toAddr == textmsgAddress && err == nil { log.Debug("[Celo]: Adding "+string(ret)+" to evm SMS queue", nil, nil) - evm.SmsQueue = append(evm.SmsQueue, string(ret)) + interpreter.evm.SmsQueue = append(interpreter.evm.SmsQueue, string(ret)) } if err != nil { - stack.push(evm.interpreter.intPool.getZero()) + stack.push(interpreter.intPool.getZero()) } else { - stack.push(evm.interpreter.intPool.get().SetUint64(1)) + stack.push(interpreter.intPool.get().SetUint64(1)) } if err == nil || err == errExecutionReverted { memory.Set(retOffset.Uint64(), retSize.Uint64(), ret) } contract.Gas += returnGas - evm.interpreter.intPool.put(addr, value, inOffset, inSize, retOffset, retSize) + interpreter.intPool.put(addr, value, inOffset, inSize, retOffset, retSize) return ret, nil } -func opCallCode(pc *uint64, evm *EVM, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) { - // Pop gas. The actual gas is in evm.callGasTemp. - evm.interpreter.intPool.put(stack.pop()) - gas := evm.callGasTemp +func opCallCode(pc *uint64, interpreter *EVMInterpreter, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) { + // Pop gas. The actual gas is in interpreter.evm.callGasTemp. + interpreter.intPool.put(stack.pop()) + gas := interpreter.evm.callGasTemp // Pop other call parameters. addr, value, inOffset, inSize, retOffset, retSize := stack.pop(), stack.pop(), stack.pop(), stack.pop(), stack.pop(), stack.pop() toAddr := common.BigToAddress(addr) @@ -713,96 +774,96 @@ func opCallCode(pc *uint64, evm *EVM, contract *Contract, memory *Memory, stack if value.Sign() != 0 { gas += params.CallStipend } - ret, returnGas, err := evm.CallCode(contract, toAddr, args, gas, value) + ret, returnGas, err := interpreter.evm.CallCode(contract, toAddr, args, gas, value) if err != nil { - stack.push(evm.interpreter.intPool.getZero()) + stack.push(interpreter.intPool.getZero()) } else { - stack.push(evm.interpreter.intPool.get().SetUint64(1)) + stack.push(interpreter.intPool.get().SetUint64(1)) } if err == nil || err == errExecutionReverted { memory.Set(retOffset.Uint64(), retSize.Uint64(), ret) } contract.Gas += returnGas - evm.interpreter.intPool.put(addr, value, inOffset, inSize, retOffset, retSize) + interpreter.intPool.put(addr, value, inOffset, inSize, retOffset, retSize) return ret, nil } -func opDelegateCall(pc *uint64, evm *EVM, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) { - // Pop gas. The actual gas is in evm.callGasTemp. - evm.interpreter.intPool.put(stack.pop()) - gas := evm.callGasTemp +func opDelegateCall(pc *uint64, interpreter *EVMInterpreter, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) { + // Pop gas. The actual gas is in interpreter.evm.callGasTemp. + interpreter.intPool.put(stack.pop()) + gas := interpreter.evm.callGasTemp // Pop other call parameters. addr, inOffset, inSize, retOffset, retSize := stack.pop(), stack.pop(), stack.pop(), stack.pop(), stack.pop() toAddr := common.BigToAddress(addr) // Get arguments from the memory. args := memory.Get(inOffset.Int64(), inSize.Int64()) - ret, returnGas, err := evm.DelegateCall(contract, toAddr, args, gas) + ret, returnGas, err := interpreter.evm.DelegateCall(contract, toAddr, args, gas) if err != nil { - stack.push(evm.interpreter.intPool.getZero()) + stack.push(interpreter.intPool.getZero()) } else { - stack.push(evm.interpreter.intPool.get().SetUint64(1)) + stack.push(interpreter.intPool.get().SetUint64(1)) } if err == nil || err == errExecutionReverted { memory.Set(retOffset.Uint64(), retSize.Uint64(), ret) } contract.Gas += returnGas - evm.interpreter.intPool.put(addr, inOffset, inSize, retOffset, retSize) + interpreter.intPool.put(addr, inOffset, inSize, retOffset, retSize) return ret, nil } -func opStaticCall(pc *uint64, evm *EVM, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) { - // Pop gas. The actual gas is in evm.callGasTemp. - evm.interpreter.intPool.put(stack.pop()) - gas := evm.callGasTemp +func opStaticCall(pc *uint64, interpreter *EVMInterpreter, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) { + // Pop gas. The actual gas is in interpreter.evm.callGasTemp. + interpreter.intPool.put(stack.pop()) + gas := interpreter.evm.callGasTemp // Pop other call parameters. addr, inOffset, inSize, retOffset, retSize := stack.pop(), stack.pop(), stack.pop(), stack.pop(), stack.pop() toAddr := common.BigToAddress(addr) // Get arguments from the memory. args := memory.Get(inOffset.Int64(), inSize.Int64()) - ret, returnGas, err := evm.StaticCall(contract, toAddr, args, gas) + ret, returnGas, err := interpreter.evm.StaticCall(contract, toAddr, args, gas) if err != nil { - stack.push(evm.interpreter.intPool.getZero()) + stack.push(interpreter.intPool.getZero()) } else { - stack.push(evm.interpreter.intPool.get().SetUint64(1)) + stack.push(interpreter.intPool.get().SetUint64(1)) } if err == nil || err == errExecutionReverted { memory.Set(retOffset.Uint64(), retSize.Uint64(), ret) } contract.Gas += returnGas - evm.interpreter.intPool.put(addr, inOffset, inSize, retOffset, retSize) + interpreter.intPool.put(addr, inOffset, inSize, retOffset, retSize) return ret, nil } -func opReturn(pc *uint64, evm *EVM, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) { +func opReturn(pc *uint64, interpreter *EVMInterpreter, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) { offset, size := stack.pop(), stack.pop() ret := memory.GetPtr(offset.Int64(), size.Int64()) - evm.interpreter.intPool.put(offset, size) + interpreter.intPool.put(offset, size) return ret, nil } -func opRevert(pc *uint64, evm *EVM, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) { +func opRevert(pc *uint64, interpreter *EVMInterpreter, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) { offset, size := stack.pop(), stack.pop() ret := memory.GetPtr(offset.Int64(), size.Int64()) - evm.interpreter.intPool.put(offset, size) + interpreter.intPool.put(offset, size) return ret, nil } -func opStop(pc *uint64, evm *EVM, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) { +func opStop(pc *uint64, interpreter *EVMInterpreter, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) { return nil, nil } -func opSuicide(pc *uint64, evm *EVM, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) { - balance := evm.StateDB.GetBalance(contract.Address()) - evm.StateDB.AddBalance(common.BigToAddress(stack.pop()), balance) +func opSuicide(pc *uint64, interpreter *EVMInterpreter, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) { + balance := interpreter.evm.StateDB.GetBalance(contract.Address()) + interpreter.evm.StateDB.AddBalance(common.BigToAddress(stack.pop()), balance) - evm.StateDB.Suicide(contract.Address()) + interpreter.evm.StateDB.Suicide(contract.Address()) return nil, nil } @@ -810,7 +871,7 @@ func opSuicide(pc *uint64, evm *EVM, contract *Contract, memory *Memory, stack * // make log instruction function func makeLog(size int) executionFunc { - return func(pc *uint64, evm *EVM, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) { + return func(pc *uint64, interpreter *EVMInterpreter, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) { topics := make([]common.Hash, size) mStart, mSize := stack.pop(), stack.pop() for i := 0; i < size; i++ { @@ -818,23 +879,23 @@ func makeLog(size int) executionFunc { } d := memory.Get(mStart.Int64(), mSize.Int64()) - evm.StateDB.AddLog(&types.Log{ + interpreter.evm.StateDB.AddLog(&types.Log{ Address: contract.Address(), Topics: topics, Data: d, // This is a non-consensus field, but assigned here because // core/state doesn't know the current block number. - BlockNumber: evm.BlockNumber.Uint64(), + BlockNumber: interpreter.evm.BlockNumber.Uint64(), }) - evm.interpreter.intPool.put(mStart, mSize) + interpreter.intPool.put(mStart, mSize) return nil, nil } } // make push instruction function func makePush(size uint64, pushByteSize int) executionFunc { - return func(pc *uint64, evm *EVM, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) { + return func(pc *uint64, interpreter *EVMInterpreter, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) { codeLen := len(contract.Code) startMin := codeLen @@ -847,7 +908,7 @@ func makePush(size uint64, pushByteSize int) executionFunc { endMin = startMin + pushByteSize } - integer := evm.interpreter.intPool.get() + integer := interpreter.intPool.get() stack.push(integer.SetBytes(common.RightPadBytes(contract.Code[startMin:endMin], pushByteSize))) *pc += size @@ -857,8 +918,8 @@ func makePush(size uint64, pushByteSize int) executionFunc { // make dup instruction function func makeDup(size int64) executionFunc { - return func(pc *uint64, evm *EVM, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) { - stack.dup(evm.interpreter.intPool, int(size)) + return func(pc *uint64, interpreter *EVMInterpreter, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) { + stack.dup(interpreter.intPool, int(size)) return nil, nil } } @@ -867,7 +928,7 @@ func makeDup(size int64) executionFunc { func makeSwap(size int64) executionFunc { // switch n + 1 otherwise n would be swapped with n size++ - return func(pc *uint64, evm *EVM, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) { + return func(pc *uint64, interpreter *EVMInterpreter, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) { stack.swap(int(size)) return nil, nil } diff --git a/core/vm/instructions_test.go b/core/vm/instructions_test.go index 1643da967973..48caadf1f16b 100644 --- a/core/vm/instructions_test.go +++ b/core/vm/instructions_test.go @@ -30,20 +30,23 @@ type twoOperandTest struct { expected string } -func testTwoOperandOp(t *testing.T, tests []twoOperandTest, opFn func(pc *uint64, evm *EVM, contract *Contract, memory *Memory, stack *Stack) ([]byte, error)) { +func testTwoOperandOp(t *testing.T, tests []twoOperandTest, opFn func(pc *uint64, interpreter *EVMInterpreter, contract *Contract, memory *Memory, stack *Stack) ([]byte, error)) { var ( - env = NewEVM(Context{}, nil, params.TestChainConfig, Config{}) - stack = newstack() - pc = uint64(0) + env = NewEVM(Context{}, nil, params.TestChainConfig, Config{}) + stack = newstack() + pc = uint64(0) + evmInterpreter = NewEVMInterpreter(env, env.vmConfig) ) - env.interpreter.intPool = poolOfIntPools.get() + + env.interpreter = evmInterpreter + evmInterpreter.intPool = poolOfIntPools.get() for i, test := range tests { x := new(big.Int).SetBytes(common.Hex2Bytes(test.x)) shift := new(big.Int).SetBytes(common.Hex2Bytes(test.y)) expected := new(big.Int).SetBytes(common.Hex2Bytes(test.expected)) stack.push(x) stack.push(shift) - opFn(&pc, env, nil, nil, stack) + opFn(&pc, evmInterpreter, nil, nil, stack) actual := stack.pop() if actual.Cmp(expected) != 0 { t.Errorf("Testcase %d, expected %v, got %v", i, expected, actual) @@ -51,13 +54,13 @@ func testTwoOperandOp(t *testing.T, tests []twoOperandTest, opFn func(pc *uint64 // Check pool usage // 1.pool is not allowed to contain anything on the stack // 2.pool is not allowed to contain the same pointers twice - if env.interpreter.intPool.pool.len() > 0 { + if evmInterpreter.intPool.pool.len() > 0 { poolvals := make(map[*big.Int]struct{}) poolvals[actual] = struct{}{} - for env.interpreter.intPool.pool.len() > 0 { - key := env.interpreter.intPool.get() + for evmInterpreter.intPool.pool.len() > 0 { + key := evmInterpreter.intPool.get() if _, exist := poolvals[key]; exist { t.Errorf("Testcase %d, pool contains double-entry", i) } @@ -65,15 +68,18 @@ func testTwoOperandOp(t *testing.T, tests []twoOperandTest, opFn func(pc *uint64 } } } - poolOfIntPools.put(env.interpreter.intPool) + poolOfIntPools.put(evmInterpreter.intPool) } func TestByteOp(t *testing.T) { var ( - env = NewEVM(Context{}, nil, params.TestChainConfig, Config{}) - stack = newstack() + env = NewEVM(Context{}, nil, params.TestChainConfig, Config{}) + stack = newstack() + evmInterpreter = NewEVMInterpreter(env, env.vmConfig) ) - env.interpreter.intPool = poolOfIntPools.get() + + env.interpreter = evmInterpreter + evmInterpreter.intPool = poolOfIntPools.get() tests := []struct { v string th uint64 @@ -94,13 +100,13 @@ func TestByteOp(t *testing.T) { th := new(big.Int).SetUint64(test.th) stack.push(val) stack.push(th) - opByte(&pc, env, nil, nil, stack) + opByte(&pc, evmInterpreter, nil, nil, stack) actual := stack.pop() if actual.Cmp(test.expected) != 0 { t.Fatalf("Expected [%v] %v:th byte to be %v, was %v.", test.v, test.th, test.expected, actual) } } - poolOfIntPools.put(env.interpreter.intPool) + poolOfIntPools.put(evmInterpreter.intPool) } func TestSHL(t *testing.T) { @@ -200,11 +206,15 @@ func TestSLT(t *testing.T) { testTwoOperandOp(t, tests, opSlt) } -func opBenchmark(bench *testing.B, op func(pc *uint64, evm *EVM, contract *Contract, memory *Memory, stack *Stack) ([]byte, error), args ...string) { +func opBenchmark(bench *testing.B, op func(pc *uint64, interpreter *EVMInterpreter, contract *Contract, memory *Memory, stack *Stack) ([]byte, error), args ...string) { var ( - env = NewEVM(Context{}, nil, params.TestChainConfig, Config{}) - stack = newstack() + env = NewEVM(Context{}, nil, params.TestChainConfig, Config{}) + stack = newstack() + evmInterpreter = NewEVMInterpreter(env, env.vmConfig) ) + + env.interpreter = evmInterpreter + evmInterpreter.intPool = poolOfIntPools.get() // convert args byteArgs := make([][]byte, len(args)) for i, arg := range args { @@ -217,9 +227,10 @@ func opBenchmark(bench *testing.B, op func(pc *uint64, evm *EVM, contract *Contr a := new(big.Int).SetBytes(arg) stack.push(a) } - op(&pc, env, nil, nil, stack) + op(&pc, evmInterpreter, nil, nil, stack) stack.pop() } + poolOfIntPools.put(evmInterpreter.intPool) } func BenchmarkOpAdd64(b *testing.B) { @@ -432,33 +443,40 @@ func BenchmarkOpIsZero(b *testing.B) { func TestOpMstore(t *testing.T) { var ( - env = NewEVM(Context{}, nil, params.TestChainConfig, Config{}) - stack = newstack() - mem = NewMemory() + env = NewEVM(Context{}, nil, params.TestChainConfig, Config{}) + stack = newstack() + mem = NewMemory() + evmInterpreter = NewEVMInterpreter(env, env.vmConfig) ) - env.interpreter.intPool = poolOfIntPools.get() + + env.interpreter = evmInterpreter + evmInterpreter.intPool = poolOfIntPools.get() mem.Resize(64) pc := uint64(0) v := "abcdef00000000000000abba000000000deaf000000c0de00100000000133700" stack.pushN(new(big.Int).SetBytes(common.Hex2Bytes(v)), big.NewInt(0)) - opMstore(&pc, env, nil, mem, stack) + opMstore(&pc, evmInterpreter, nil, mem, stack) if got := common.Bytes2Hex(mem.Get(0, 32)); got != v { t.Fatalf("Mstore fail, got %v, expected %v", got, v) } stack.pushN(big.NewInt(0x1), big.NewInt(0)) - opMstore(&pc, env, nil, mem, stack) + opMstore(&pc, evmInterpreter, nil, mem, stack) if common.Bytes2Hex(mem.Get(0, 32)) != "0000000000000000000000000000000000000000000000000000000000000001" { t.Fatalf("Mstore failed to overwrite previous value") } - poolOfIntPools.put(env.interpreter.intPool) + poolOfIntPools.put(evmInterpreter.intPool) } func BenchmarkOpMstore(bench *testing.B) { var ( - env = NewEVM(Context{}, nil, params.TestChainConfig, Config{}) - stack = newstack() - mem = NewMemory() + env = NewEVM(Context{}, nil, params.TestChainConfig, Config{}) + stack = newstack() + mem = NewMemory() + evmInterpreter = NewEVMInterpreter(env, env.vmConfig) ) + + env.interpreter = evmInterpreter + evmInterpreter.intPool = poolOfIntPools.get() mem.Resize(64) pc := uint64(0) memStart := big.NewInt(0) @@ -467,6 +485,7 @@ func BenchmarkOpMstore(bench *testing.B) { bench.ResetTimer() for i := 0; i < bench.N; i++ { stack.pushN(value, memStart) - opMstore(&pc, env, nil, mem, stack) + opMstore(&pc, evmInterpreter, nil, mem, stack) } + poolOfIntPools.put(evmInterpreter.intPool) } diff --git a/core/vm/interpreter.go b/core/vm/interpreter.go index 0d6038cbbf70..1e9202424a20 100644 --- a/core/vm/interpreter.go +++ b/core/vm/interpreter.go @@ -45,7 +45,30 @@ type Config struct { // passed environment to query external sources for state information. // The Interpreter will run the byte code VM based on the passed // configuration. -type Interpreter struct { +type Interpreter interface { + // Run loops and evaluates the contract's code with the given input data and returns + // the return byte-slice and an error if one occurred. + Run(contract *Contract, input []byte) ([]byte, error) + // CanRun tells if the contract, passed as an argument, can be + // run by the current interpreter. This is meant so that the + // caller can do something like: + // + // ```golang + // for _, interpreter := range interpreters { + // if interpreter.CanRun(contract.code) { + // interpreter.Run(contract.code, input) + // } + // } + // ``` + CanRun([]byte) bool + // IsReadOnly reports if the interpreter is in read only mode. + IsReadOnly() bool + // SetReadOnly sets (or unsets) read only mode in the interpreter. + SetReadOnly(bool) +} + +// EVMInterpreter represents an EVM interpreter +type EVMInterpreter struct { evm *EVM cfg Config gasTable params.GasTable @@ -55,8 +78,8 @@ type Interpreter struct { returnData []byte // Last CALL's return data for subsequent reuse } -// NewInterpreter returns a new instance of the Interpreter. -func NewInterpreter(evm *EVM, cfg Config) *Interpreter { +// NewEVMInterpreter returns a new instance of the Interpreter. +func NewEVMInterpreter(evm *EVM, cfg Config) *EVMInterpreter { // We use the STOP instruction whether to see // the jump table was initialised. If it was not // we'll set the default jump table. @@ -73,14 +96,14 @@ func NewInterpreter(evm *EVM, cfg Config) *Interpreter { } } - return &Interpreter{ + return &EVMInterpreter{ evm: evm, cfg: cfg, gasTable: evm.ChainConfig().GasTable(evm.BlockNumber), } } -func (in *Interpreter) enforceRestrictions(op OpCode, operation operation, stack *Stack) error { +func (in *EVMInterpreter) enforceRestrictions(op OpCode, operation operation, stack *Stack) error { if in.evm.chainRules.IsByzantium { if in.readOnly { // If the interpreter is operating in readonly mode, make sure no @@ -102,7 +125,7 @@ func (in *Interpreter) enforceRestrictions(op OpCode, operation operation, stack // It's important to note that any errors returned by the interpreter should be // considered a revert-and-consume-all-gas operation except for // errExecutionReverted which means revert-and-keep-gas-left. -func (in *Interpreter) Run(contract *Contract, input []byte) (ret []byte, err error) { +func (in *EVMInterpreter) Run(contract *Contract, input []byte) (ret []byte, err error) { if in.intPool == nil { in.intPool = poolOfIntPools.get() defer func() { @@ -209,7 +232,7 @@ func (in *Interpreter) Run(contract *Contract, input []byte) (ret []byte, err er } // execute the operation - res, err := operation.execute(&pc, in.evm, contract, mem, stack) + res, err := operation.execute(&pc, in, contract, mem, stack) // verifyPool is a build flag. Pool verification makes sure the integrity // of the integer pool by comparing values to a default value. if verifyPool { @@ -234,3 +257,19 @@ func (in *Interpreter) Run(contract *Contract, input []byte) (ret []byte, err er } return nil, nil } + +// CanRun tells if the contract, passed as an argument, can be +// run by the current interpreter. +func (in *EVMInterpreter) CanRun(code []byte) bool { + return true +} + +// IsReadOnly reports if the interpreter is in read only mode. +func (in *EVMInterpreter) IsReadOnly() bool { + return in.readOnly +} + +// SetReadOnly sets (or unsets) read only mode in the interpreter. +func (in *EVMInterpreter) SetReadOnly(ro bool) { + in.readOnly = ro +} diff --git a/core/vm/jump_table.go b/core/vm/jump_table.go index 111a9b798178..deedf70cdb7d 100644 --- a/core/vm/jump_table.go +++ b/core/vm/jump_table.go @@ -24,7 +24,7 @@ import ( ) type ( - executionFunc func(pc *uint64, env *EVM, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) + executionFunc func(pc *uint64, interpreter *EVMInterpreter, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) gasFunc func(params.GasTable, *EVM, *Contract, *Stack, *Memory, uint64) (uint64, error) // last parameter is the requested memory size as a uint64 stackValidationFunc func(*Stack) error memorySizeFunc func(*Stack) *big.Int @@ -80,6 +80,21 @@ func newConstantinopleInstructionSet() [256]operation { validateStack: makeStackFunc(2, 1), valid: true, } + instructionSet[EXTCODEHASH] = operation{ + execute: opExtCodeHash, + gasCost: gasExtCodeHash, + validateStack: makeStackFunc(1, 1), + valid: true, + } + instructionSet[CREATE2] = operation{ + execute: opCreate2, + gasCost: gasCreate2, + validateStack: makeStackFunc(4, 1), + memorySize: memoryCreate2, + valid: true, + writes: true, + returns: true, + } return instructionSet } diff --git a/core/vm/memory_table.go b/core/vm/memory_table.go index ab49ebb38bff..8fa6c90ca7d4 100644 --- a/core/vm/memory_table.go +++ b/core/vm/memory_table.go @@ -58,6 +58,10 @@ func memoryCreate(stack *Stack) *big.Int { return calcMemSize(stack.Back(1), stack.Back(2)) } +func memoryCreate2(stack *Stack) *big.Int { + return calcMemSize(stack.Back(1), stack.Back(2)) +} + func memoryCall(stack *Stack) *big.Int { x := calcMemSize(stack.Back(5), stack.Back(6)) y := calcMemSize(stack.Back(3), stack.Back(4)) diff --git a/core/vm/opcodes.go b/core/vm/opcodes.go index 6c12c50e5111..4349ffd29525 100644 --- a/core/vm/opcodes.go +++ b/core/vm/opcodes.go @@ -90,6 +90,7 @@ const ( EXTCODECOPY RETURNDATASIZE RETURNDATACOPY + EXTCODEHASH ) // 0x40 range - block operations. @@ -209,6 +210,7 @@ const ( CALLCODE RETURN DELEGATECALL + CREATE2 STATICCALL = 0xfa REVERT = 0xfd @@ -266,6 +268,7 @@ var opCodeToString = map[OpCode]string{ EXTCODECOPY: "EXTCODECOPY", RETURNDATASIZE: "RETURNDATASIZE", RETURNDATACOPY: "RETURNDATACOPY", + EXTCODEHASH: "EXTCODEHASH", // 0x40 range - block operations. BLOCKHASH: "BLOCKHASH", @@ -370,6 +373,7 @@ var opCodeToString = map[OpCode]string{ RETURN: "RETURN", CALLCODE: "CALLCODE", DELEGATECALL: "DELEGATECALL", + CREATE2: "CREATE2", STATICCALL: "STATICCALL", REVERT: "REVERT", SELFDESTRUCT: "SELFDESTRUCT", @@ -433,6 +437,7 @@ var stringToOp = map[string]OpCode{ "EXTCODECOPY": EXTCODECOPY, "RETURNDATASIZE": RETURNDATASIZE, "RETURNDATACOPY": RETURNDATACOPY, + "EXTCODEHASH": EXTCODEHASH, "BLOCKHASH": BLOCKHASH, "COINBASE": COINBASE, "TIMESTAMP": TIMESTAMP, @@ -521,6 +526,7 @@ var stringToOp = map[string]OpCode{ "LOG3": LOG3, "LOG4": LOG4, "CREATE": CREATE, + "CREATE2": CREATE2, "CALL": CALL, "RETURN": RETURN, "CALLCODE": CALLCODE, diff --git a/crypto/bn256/LICENSE b/crypto/bn256/LICENSE new file mode 100644 index 000000000000..634e0cb2c362 --- /dev/null +++ b/crypto/bn256/LICENSE @@ -0,0 +1,28 @@ +Copyright (c) 2012 The Go Authors. All rights reserved. +Copyright (c) 2018 Péter Szilágyi. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/crypto/bn256/bn256_fast.go b/crypto/bn256/bn256_fast.go index a8dfa8f67d1d..5c081493b0fb 100644 --- a/crypto/bn256/bn256_fast.go +++ b/crypto/bn256/bn256_fast.go @@ -1,18 +1,6 @@ -// Copyright 2018 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . +// Copyright 2018 Péter Szilágyi. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be found +// in the LICENSE file. // +build amd64 arm64 diff --git a/crypto/bn256/bn256_fuzz.go b/crypto/bn256/bn256_fuzz.go index f360b0541fec..6aa142117045 100644 --- a/crypto/bn256/bn256_fuzz.go +++ b/crypto/bn256/bn256_fuzz.go @@ -1,18 +1,6 @@ -// Copyright 2018 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . +// Copyright 2018 Péter Szilágyi. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be found +// in the LICENSE file. // +build gofuzz diff --git a/crypto/bn256/bn256_slow.go b/crypto/bn256/bn256_slow.go index 61373763b89d..47df49d41763 100644 --- a/crypto/bn256/bn256_slow.go +++ b/crypto/bn256/bn256_slow.go @@ -1,18 +1,6 @@ -// Copyright 2018 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . +// Copyright 2018 Péter Szilágyi. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be found +// in the LICENSE file. // +build !amd64,!arm64 diff --git a/crypto/bn256/cloudflare/LICENSE b/crypto/bn256/cloudflare/LICENSE new file mode 100644 index 000000000000..6a66aea5eafe --- /dev/null +++ b/crypto/bn256/cloudflare/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/crypto/bn256/cloudflare/gfp_amd64.s b/crypto/bn256/cloudflare/gfp_amd64.s index 3a785d200bcf..bdb4ffb78707 100644 --- a/crypto/bn256/cloudflare/gfp_amd64.s +++ b/crypto/bn256/cloudflare/gfp_amd64.s @@ -110,7 +110,7 @@ TEXT ·gfpMul(SB),0,$160-24 MOVQ b+16(FP), SI // Jump to a slightly different implementation if MULX isn't supported. - CMPB runtime·support_bmi2(SB), $0 + CMPB ·hasBMI2(SB), $0 JE nobmi2Mul mulBMI2(0(DI),8(DI),16(DI),24(DI), 0(SI)) diff --git a/crypto/bn256/cloudflare/gfp_decl.go b/crypto/bn256/cloudflare/gfp_decl.go index 6a8a4fddb611..fdea5c11a5be 100644 --- a/crypto/bn256/cloudflare/gfp_decl.go +++ b/crypto/bn256/cloudflare/gfp_decl.go @@ -5,6 +5,13 @@ package bn256 // This file contains forward declarations for the architecture-specific // assembly implementations of these functions, provided that they exist. +import ( + "golang.org/x/sys/cpu" +) + +//nolint:varcheck +var hasBMI2 = cpu.X86.HasBMI2 + // go:noescape func gfpNeg(c, a *gfP) diff --git a/crypto/bn256/google/bn256.go b/crypto/bn256/google/bn256.go index 5da83e033a98..e0402e51f0e2 100644 --- a/crypto/bn256/google/bn256.go +++ b/crypto/bn256/google/bn256.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// Package bn256 implements a particular bilinear group at the 128-bit security level. +// Package bn256 implements a particular bilinear group. // // Bilinear groups are the basis of many of the new cryptographic protocols // that have been proposed over the past decade. They consist of a triplet of @@ -14,6 +14,10 @@ // Barreto-Naehrig curve as described in // http://cryptojedi.org/papers/dclxvi-20100714.pdf. Its output is compatible // with the implementation described in that paper. +// +// (This package previously claimed to operate at a 128-bit security level. +// However, recent improvements in attacks mean that is no longer true. See +// https://moderncrypto.org/mail-archive/curves/2016/000740.html.) package bn256 import ( @@ -50,8 +54,8 @@ func RandomG1(r io.Reader) (*big.Int, *G1, error) { return k, new(G1).ScalarBaseMult(k), nil } -func (g *G1) String() string { - return "bn256.G1" + g.p.String() +func (e *G1) String() string { + return "bn256.G1" + e.p.String() } // CurvePoints returns p's curve points in big integer @@ -98,15 +102,19 @@ func (e *G1) Neg(a *G1) *G1 { } // Marshal converts n to a byte slice. -func (n *G1) Marshal() []byte { - n.p.MakeAffine(nil) - - xBytes := new(big.Int).Mod(n.p.x, P).Bytes() - yBytes := new(big.Int).Mod(n.p.y, P).Bytes() - +func (e *G1) Marshal() []byte { // Each value is a 256-bit number. const numBytes = 256 / 8 + if e.p.IsInfinity() { + return make([]byte, numBytes*2) + } + + e.p.MakeAffine(nil) + + xBytes := new(big.Int).Mod(e.p.x, P).Bytes() + yBytes := new(big.Int).Mod(e.p.y, P).Bytes() + ret := make([]byte, numBytes*2) copy(ret[1*numBytes-len(xBytes):], xBytes) copy(ret[2*numBytes-len(yBytes):], yBytes) @@ -175,8 +183,8 @@ func RandomG2(r io.Reader) (*big.Int, *G2, error) { return k, new(G2).ScalarBaseMult(k), nil } -func (g *G2) String() string { - return "bn256.G2" + g.p.String() +func (e *G2) String() string { + return "bn256.G2" + e.p.String() } // CurvePoints returns the curve points of p which includes the real @@ -216,6 +224,13 @@ func (e *G2) Add(a, b *G2) *G2 { // Marshal converts n into a byte slice. func (n *G2) Marshal() []byte { + // Each value is a 256-bit number. + const numBytes = 256 / 8 + + if n.p.IsInfinity() { + return make([]byte, numBytes*4) + } + n.p.MakeAffine(nil) xxBytes := new(big.Int).Mod(n.p.x.x, P).Bytes() @@ -223,9 +238,6 @@ func (n *G2) Marshal() []byte { yxBytes := new(big.Int).Mod(n.p.y.x, P).Bytes() yyBytes := new(big.Int).Mod(n.p.y.y, P).Bytes() - // Each value is a 256-bit number. - const numBytes = 256 / 8 - ret := make([]byte, numBytes*4) copy(ret[1*numBytes-len(xxBytes):], xxBytes) copy(ret[2*numBytes-len(xyBytes):], xyBytes) diff --git a/crypto/bn256/google/curve.go b/crypto/bn256/google/curve.go index 3e679fdc7e8f..819cb81da7ab 100644 --- a/crypto/bn256/google/curve.go +++ b/crypto/bn256/google/curve.go @@ -245,11 +245,19 @@ func (c *curvePoint) Mul(a *curvePoint, scalar *big.Int, pool *bnPool) *curvePoi return c } +// MakeAffine converts c to affine form and returns c. If c is ∞, then it sets +// c to 0 : 1 : 0. func (c *curvePoint) MakeAffine(pool *bnPool) *curvePoint { if words := c.z.Bits(); len(words) == 1 && words[0] == 1 { return c } - + if c.IsInfinity() { + c.x.SetInt64(0) + c.y.SetInt64(1) + c.z.SetInt64(0) + c.t.SetInt64(0) + return c + } zInv := pool.Get().ModInverse(c.z, P) t := pool.Get().Mul(c.y, zInv) t.Mod(t, P) diff --git a/crypto/bn256/google/twist.go b/crypto/bn256/google/twist.go index 1f5a4d9deb9b..43364ff5b7bd 100644 --- a/crypto/bn256/google/twist.go +++ b/crypto/bn256/google/twist.go @@ -225,11 +225,19 @@ func (c *twistPoint) Mul(a *twistPoint, scalar *big.Int, pool *bnPool) *twistPoi return c } +// MakeAffine converts c to affine form and returns c. If c is ∞, then it sets +// c to 0 : 1 : 0. func (c *twistPoint) MakeAffine(pool *bnPool) *twistPoint { if c.z.IsOne() { return c } - + if c.IsInfinity() { + c.x.SetZero() + c.y.SetOne() + c.z.SetZero() + c.t.SetZero() + return c + } zInv := newGFp2(pool).Invert(c.z, pool) t := newGFp2(pool).Mul(c.y, zInv, pool) zInv2 := newGFp2(pool).Square(zInv, pool) diff --git a/crypto/crypto.go b/crypto/crypto.go index 619440e81750..3211957e0ab9 100644 --- a/crypto/crypto.go +++ b/crypto/crypto.go @@ -76,6 +76,12 @@ func CreateAddress(b common.Address, nonce uint64) common.Address { return common.BytesToAddress(Keccak256(data)[12:]) } +// CreateAddress2 creates an ethereum address given the address bytes, initial +// contract code and a salt. +func CreateAddress2(b common.Address, salt [32]byte, code []byte) common.Address { + return common.BytesToAddress(Keccak256([]byte{0xff}, b.Bytes(), salt[:], Keccak256(code))[12:]) +} + // ToECDSA creates a private key with the given D value. func ToECDSA(d []byte) (*ecdsa.PrivateKey, error) { return toECDSA(d, true) diff --git a/crypto/secp256k1/LICENSE b/crypto/secp256k1/LICENSE new file mode 100644 index 000000000000..f9090e14236b --- /dev/null +++ b/crypto/secp256k1/LICENSE @@ -0,0 +1,31 @@ +Copyright (c) 2010 The Go Authors. All rights reserved. +Copyright (c) 2011 ThePiachu. All rights reserved. +Copyright (c) 2015 Jeffrey Wilcke. All rights reserved. +Copyright (c) 2015 Felix Lange. All rights reserved. +Copyright (c) 2015 Gustav Simonsson. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of the copyright holder. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/crypto/secp256k1/curve.go b/crypto/secp256k1/curve.go index 6fdf2be6a47d..56be235b363b 100644 --- a/crypto/secp256k1/curve.go +++ b/crypto/secp256k1/curve.go @@ -1,5 +1,6 @@ // Copyright 2010 The Go Authors. All rights reserved. // Copyright 2011 ThePiachu. All rights reserved. +// Copyright 2015 Jeffrey Wilcke, Felix Lange, Gustav Simonsson. All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are @@ -35,8 +36,6 @@ import ( "crypto/elliptic" "math/big" "unsafe" - - "github.com/ethereum/go-ethereum/common/math" ) /* @@ -45,6 +44,27 @@ extern int secp256k1_ext_scalar_mul(const secp256k1_context* ctx, const unsigned */ import "C" +const ( + // number of bits in a big.Word + wordBits = 32 << (uint64(^big.Word(0)) >> 63) + // number of bytes in a big.Word + wordBytes = wordBits / 8 +) + +// readBits encodes the absolute value of bigint as big-endian bytes. Callers +// must ensure that buf has enough space. If buf is too short the result will +// be incomplete. +func readBits(bigint *big.Int, buf []byte) { + i := len(buf) + for _, d := range bigint.Bits() { + for j := 0; j < wordBytes && i > 0; j++ { + i-- + buf[i] = byte(d) + d >>= 8 + } + } +} + // This code is from https://github.com/ThePiachu/GoBit and implements // several Koblitz elliptic curves over prime fields. // @@ -231,8 +251,9 @@ func (BitCurve *BitCurve) ScalarMult(Bx, By *big.Int, scalar []byte) (*big.Int, // Do the multiplication in C, updating point. point := make([]byte, 64) - math.ReadBits(Bx, point[:32]) - math.ReadBits(By, point[32:]) + readBits(Bx, point[:32]) + readBits(By, point[32:]) + pointPtr := (*C.uchar)(unsafe.Pointer(&point[0])) scalarPtr := (*C.uchar)(unsafe.Pointer(&scalar[0])) res := C.secp256k1_ext_scalar_mul(context, pointPtr, scalarPtr) @@ -264,8 +285,8 @@ func (BitCurve *BitCurve) Marshal(x, y *big.Int) []byte { byteLen := (BitCurve.BitSize + 7) >> 3 ret := make([]byte, 1+2*byteLen) ret[0] = 4 // uncompressed point flag - math.ReadBits(x, ret[1:1+byteLen]) - math.ReadBits(y, ret[1+byteLen:]) + readBits(x, ret[1:1+byteLen]) + readBits(y, ret[1+byteLen:]) return ret } @@ -290,11 +311,11 @@ func init() { // See SEC 2 section 2.7.1 // curve parameters taken from: // http://www.secg.org/collateral/sec2_final.pdf - theCurve.P = math.MustParseBig256("0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFFC2F") - theCurve.N = math.MustParseBig256("0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEBAAEDCE6AF48A03BBFD25E8CD0364141") - theCurve.B = math.MustParseBig256("0x0000000000000000000000000000000000000000000000000000000000000007") - theCurve.Gx = math.MustParseBig256("0x79BE667EF9DCBBAC55A06295CE870B07029BFCDB2DCE28D959F2815B16F81798") - theCurve.Gy = math.MustParseBig256("0x483ADA7726A3C4655DA4FBFC0E1108A8FD17B448A68554199C47D08FFB10D4B8") + theCurve.P, _ = new(big.Int).SetString("0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFFC2F", 0) + theCurve.N, _ = new(big.Int).SetString("0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEBAAEDCE6AF48A03BBFD25E8CD0364141", 0) + theCurve.B, _ = new(big.Int).SetString("0x0000000000000000000000000000000000000000000000000000000000000007", 0) + theCurve.Gx, _ = new(big.Int).SetString("0x79BE667EF9DCBBAC55A06295CE870B07029BFCDB2DCE28D959F2815B16F81798", 0) + theCurve.Gy, _ = new(big.Int).SetString("0x483ADA7726A3C4655DA4FBFC0E1108A8FD17B448A68554199C47D08FFB10D4B8", 0) theCurve.BitSize = 256 } diff --git a/crypto/secp256k1/ext.h b/crypto/secp256k1/ext.h index 9b043c724e0a..e422fe4b496d 100644 --- a/crypto/secp256k1/ext.h +++ b/crypto/secp256k1/ext.h @@ -1,18 +1,6 @@ -// Copyright 2015 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . +// Copyright 2015 Jeffrey Wilcke, Felix Lange, Gustav Simonsson. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be found in +// the LICENSE file. // secp256k1_context_create_sign_verify creates a context for signing and signature verification. static secp256k1_context* secp256k1_context_create_sign_verify() { diff --git a/crypto/secp256k1/panic_cb.go b/crypto/secp256k1/panic_cb.go index e0e9034ee025..6d59a1d247ea 100644 --- a/crypto/secp256k1/panic_cb.go +++ b/crypto/secp256k1/panic_cb.go @@ -1,18 +1,6 @@ -// Copyright 2015 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . +// Copyright 2015 Jeffrey Wilcke, Felix Lange, Gustav Simonsson. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be found in +// the LICENSE file. package secp256k1 diff --git a/crypto/secp256k1/secp256.go b/crypto/secp256k1/secp256.go index eefbb99ee4f2..843fb1252505 100644 --- a/crypto/secp256k1/secp256.go +++ b/crypto/secp256k1/secp256.go @@ -1,18 +1,6 @@ -// Copyright 2015 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . +// Copyright 2015 Jeffrey Wilcke, Felix Lange, Gustav Simonsson. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be found in +// the LICENSE file. // Package secp256k1 wraps the bitcoin secp256k1 C library. package secp256k1 diff --git a/crypto/secp256k1/secp256_test.go b/crypto/secp256k1/secp256_test.go index b608bcfcf107..ef2a3a3790b4 100644 --- a/crypto/secp256k1/secp256_test.go +++ b/crypto/secp256k1/secp256_test.go @@ -1,18 +1,6 @@ -// Copyright 2015 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . +// Copyright 2015 Jeffrey Wilcke, Felix Lange, Gustav Simonsson. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be found in +// the LICENSE file. package secp256k1 @@ -22,10 +10,8 @@ import ( "crypto/elliptic" "crypto/rand" "encoding/hex" + "io" "testing" - - "github.com/ethereum/go-ethereum/common/math" - "github.com/ethereum/go-ethereum/crypto/randentropy" ) const TestCount = 1000 @@ -36,11 +22,24 @@ func generateKeyPair() (pubkey, privkey []byte) { panic(err) } pubkey = elliptic.Marshal(S256(), key.X, key.Y) - return pubkey, math.PaddedBigBytes(key.D, 32) + + privkey = make([]byte, 32) + blob := key.D.Bytes() + copy(privkey[32-len(blob):], blob) + + return pubkey, privkey +} + +func csprngEntropy(n int) []byte { + buf := make([]byte, n) + if _, err := io.ReadFull(rand.Reader, buf); err != nil { + panic("reading from crypto/rand failed: " + err.Error()) + } + return buf } func randSig() []byte { - sig := randentropy.GetEntropyCSPRNG(65) + sig := csprngEntropy(65) sig[32] &= 0x70 sig[64] %= 4 return sig @@ -63,7 +62,7 @@ func compactSigCheck(t *testing.T, sig []byte) { func TestSignatureValidity(t *testing.T) { pubkey, seckey := generateKeyPair() - msg := randentropy.GetEntropyCSPRNG(32) + msg := csprngEntropy(32) sig, err := Sign(msg, seckey) if err != nil { t.Errorf("signature error: %s", err) @@ -86,7 +85,7 @@ func TestSignatureValidity(t *testing.T) { func TestInvalidRecoveryID(t *testing.T) { _, seckey := generateKeyPair() - msg := randentropy.GetEntropyCSPRNG(32) + msg := csprngEntropy(32) sig, _ := Sign(msg, seckey) sig[64] = 99 _, err := RecoverPubkey(msg, sig) @@ -97,7 +96,7 @@ func TestInvalidRecoveryID(t *testing.T) { func TestSignAndRecover(t *testing.T) { pubkey1, seckey := generateKeyPair() - msg := randentropy.GetEntropyCSPRNG(32) + msg := csprngEntropy(32) sig, err := Sign(msg, seckey) if err != nil { t.Errorf("signature error: %s", err) @@ -148,7 +147,7 @@ func TestRandomMessagesWithRandomKeys(t *testing.T) { func signAndRecoverWithRandomMessages(t *testing.T, keys func() ([]byte, []byte)) { for i := 0; i < TestCount; i++ { pubkey1, seckey := keys() - msg := randentropy.GetEntropyCSPRNG(32) + msg := csprngEntropy(32) sig, err := Sign(msg, seckey) if err != nil { t.Fatalf("signature error: %s", err) @@ -176,7 +175,7 @@ func signAndRecoverWithRandomMessages(t *testing.T, keys func() ([]byte, []byte) func TestRecoveryOfRandomSignature(t *testing.T) { pubkey1, _ := generateKeyPair() - msg := randentropy.GetEntropyCSPRNG(32) + msg := csprngEntropy(32) for i := 0; i < TestCount; i++ { // recovery can sometimes work, but if so should always give wrong pubkey @@ -189,11 +188,11 @@ func TestRecoveryOfRandomSignature(t *testing.T) { func TestRandomMessagesAgainstValidSig(t *testing.T) { pubkey1, seckey := generateKeyPair() - msg := randentropy.GetEntropyCSPRNG(32) + msg := csprngEntropy(32) sig, _ := Sign(msg, seckey) for i := 0; i < TestCount; i++ { - msg = randentropy.GetEntropyCSPRNG(32) + msg = csprngEntropy(32) pubkey2, _ := RecoverPubkey(msg, sig) // recovery can sometimes work, but if so should always give wrong pubkey if bytes.Equal(pubkey1, pubkey2) { @@ -219,7 +218,7 @@ func TestRecoverSanity(t *testing.T) { func BenchmarkSign(b *testing.B) { _, seckey := generateKeyPair() - msg := randentropy.GetEntropyCSPRNG(32) + msg := csprngEntropy(32) b.ResetTimer() for i := 0; i < b.N; i++ { @@ -228,7 +227,7 @@ func BenchmarkSign(b *testing.B) { } func BenchmarkRecover(b *testing.B) { - msg := randentropy.GetEntropyCSPRNG(32) + msg := csprngEntropy(32) _, seckey := generateKeyPair() sig, _ := Sign(msg, seckey) b.ResetTimer() diff --git a/dashboard/assets.go b/dashboard/assets.go index 521d134a6ab1..f3e7cf981506 100644 --- a/dashboard/assets.go +++ b/dashboard/assets.go @@ -64,6 +64,9 @@ var _indexHtml = []byte(` ::-webkit-scrollbar-thumb { background: #212121; } + ::-webkit-scrollbar-corner { + background: transparent; + } @@ -84,7 +87,7 @@ func indexHtml() (*asset, error) { } info := bindataFileInfo{name: "index.html", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} - a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x6b, 0xd9, 0xa6, 0xeb, 0x32, 0x49, 0x9b, 0xe5, 0x3a, 0xcb, 0x99, 0xd3, 0xb6, 0x69, 0x7f, 0xde, 0x35, 0x9d, 0x5, 0x96, 0x84, 0xc0, 0x14, 0xef, 0xbe, 0x58, 0x10, 0x5e, 0x40, 0xf2, 0x12, 0x97}} + a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x22, 0xc8, 0x3d, 0x86, 0x2f, 0xb4, 0x6a, 0x1f, 0xda, 0xd, 0x54, 0x14, 0xa3, 0x6e, 0x80, 0x56, 0x28, 0xea, 0x44, 0xcf, 0xf5, 0xf2, 0xe, 0xad, 0x19, 0xf5, 0x93, 0xd6, 0x8d, 0x6d, 0x2f, 0x35}} return a, nil } @@ -116,11 +119,11 @@ var _bundleJs = []byte((((((((((`!function(modules) { return __webpack_require__.d(getter, "a", getter), getter; }, __webpack_require__.o = function(object, property) { return Object.prototype.hasOwnProperty.call(object, property); - }, __webpack_require__.p = "", __webpack_require__(__webpack_require__.s = 336); + }, __webpack_require__.p = "", __webpack_require__(__webpack_require__.s = 375); }([ function(module, exports, __webpack_require__) { "use strict"; (function(process) { - "production" === process.env.NODE_ENV ? module.exports = __webpack_require__(337) : module.exports = __webpack_require__(338); + "production" === process.env.NODE_ENV ? module.exports = __webpack_require__(376) : module.exports = __webpack_require__(377); }).call(exports, __webpack_require__(2)); }, function(module, exports, __webpack_require__) { (function(process) { @@ -128,8 +131,8 @@ var _bundleJs = []byte((((((((((`!function(modules) { var REACT_ELEMENT_TYPE = "function" == typeof Symbol && Symbol.for && Symbol.for("react.element") || 60103, isValidElement = function(object) { return "object" == typeof object && null !== object && object.$$typeof === REACT_ELEMENT_TYPE; }; - module.exports = __webpack_require__(379)(isValidElement, !0); - } else module.exports = __webpack_require__(380)(); + module.exports = __webpack_require__(418)(isValidElement, !0); + } else module.exports = __webpack_require__(419)(); }).call(exports, __webpack_require__(2)); }, function(module, exports) { function defaultSetTimout() { @@ -289,7 +292,7 @@ var _bundleJs = []byte((((((((((`!function(modules) { }), __webpack_require__.d(__webpack_exports__, "o", function() { return parseChildIndex; }); - var __WEBPACK_IMPORTED_MODULE_0_lodash_isNil__ = __webpack_require__(20), __WEBPACK_IMPORTED_MODULE_0_lodash_isNil___default = __webpack_require__.n(__WEBPACK_IMPORTED_MODULE_0_lodash_isNil__), __WEBPACK_IMPORTED_MODULE_1_lodash_isString__ = __webpack_require__(163), __WEBPACK_IMPORTED_MODULE_1_lodash_isString___default = __webpack_require__.n(__WEBPACK_IMPORTED_MODULE_1_lodash_isString__), __WEBPACK_IMPORTED_MODULE_2_lodash_isObject__ = __webpack_require__(31), __WEBPACK_IMPORTED_MODULE_2_lodash_isObject___default = __webpack_require__.n(__WEBPACK_IMPORTED_MODULE_2_lodash_isObject__), __WEBPACK_IMPORTED_MODULE_3_lodash_isFunction__ = __webpack_require__(8), __WEBPACK_IMPORTED_MODULE_3_lodash_isFunction___default = __webpack_require__.n(__WEBPACK_IMPORTED_MODULE_3_lodash_isFunction__), __WEBPACK_IMPORTED_MODULE_4_lodash_isArray__ = __webpack_require__(11), __WEBPACK_IMPORTED_MODULE_4_lodash_isArray___default = __webpack_require__.n(__WEBPACK_IMPORTED_MODULE_4_lodash_isArray__), __WEBPACK_IMPORTED_MODULE_5_react__ = __webpack_require__(0), __WEBPACK_IMPORTED_MODULE_5_react___default = __webpack_require__.n(__WEBPACK_IMPORTED_MODULE_5_react__), __WEBPACK_IMPORTED_MODULE_6_prop_types__ = __webpack_require__(1), __WEBPACK_IMPORTED_MODULE_6_prop_types___default = __webpack_require__.n(__WEBPACK_IMPORTED_MODULE_6_prop_types__), __WEBPACK_IMPORTED_MODULE_7__DataUtils__ = __webpack_require__(9), __WEBPACK_IMPORTED_MODULE_8__PureRender__ = __webpack_require__(5), PRESENTATION_ATTRIBUTES = { + var __WEBPACK_IMPORTED_MODULE_0_lodash_isNil__ = __webpack_require__(20), __WEBPACK_IMPORTED_MODULE_0_lodash_isNil___default = __webpack_require__.n(__WEBPACK_IMPORTED_MODULE_0_lodash_isNil__), __WEBPACK_IMPORTED_MODULE_1_lodash_isString__ = __webpack_require__(173), __WEBPACK_IMPORTED_MODULE_1_lodash_isString___default = __webpack_require__.n(__WEBPACK_IMPORTED_MODULE_1_lodash_isString__), __WEBPACK_IMPORTED_MODULE_2_lodash_isObject__ = __webpack_require__(32), __WEBPACK_IMPORTED_MODULE_2_lodash_isObject___default = __webpack_require__.n(__WEBPACK_IMPORTED_MODULE_2_lodash_isObject__), __WEBPACK_IMPORTED_MODULE_3_lodash_isFunction__ = __webpack_require__(8), __WEBPACK_IMPORTED_MODULE_3_lodash_isFunction___default = __webpack_require__.n(__WEBPACK_IMPORTED_MODULE_3_lodash_isFunction__), __WEBPACK_IMPORTED_MODULE_4_lodash_isArray__ = __webpack_require__(13), __WEBPACK_IMPORTED_MODULE_4_lodash_isArray___default = __webpack_require__.n(__WEBPACK_IMPORTED_MODULE_4_lodash_isArray__), __WEBPACK_IMPORTED_MODULE_5_react__ = __webpack_require__(0), __WEBPACK_IMPORTED_MODULE_5_react___default = __webpack_require__.n(__WEBPACK_IMPORTED_MODULE_5_react__), __WEBPACK_IMPORTED_MODULE_6_prop_types__ = __webpack_require__(1), __WEBPACK_IMPORTED_MODULE_6_prop_types___default = __webpack_require__.n(__WEBPACK_IMPORTED_MODULE_6_prop_types__), __WEBPACK_IMPORTED_MODULE_7__DataUtils__ = __webpack_require__(9), __WEBPACK_IMPORTED_MODULE_8__PureRender__ = __webpack_require__(5), PRESENTATION_ATTRIBUTES = { alignmentBaseline: __WEBPACK_IMPORTED_MODULE_6_prop_types___default.a.string, angle: __WEBPACK_IMPORTED_MODULE_6_prop_types___default.a.number, baselineShift: __WEBPACK_IMPORTED_MODULE_6_prop_types___default.a.string, @@ -502,7 +505,7 @@ var _bundleJs = []byte((((((((((`!function(modules) { }, function(module, exports, __webpack_require__) { "use strict"; exports.__esModule = !0; - var _assign = __webpack_require__(204), _assign2 = function(obj) { + var _assign = __webpack_require__(222), _assign2 = function(obj) { return obj && obj.__esModule ? obj : { default: obj }; @@ -527,7 +530,7 @@ var _bundleJs = []byte((((((((((`!function(modules) { var tag = baseGetTag(value); return tag == funcTag || tag == genTag || tag == asyncTag || tag == proxyTag; } - var baseGetTag = __webpack_require__(41), isObject = __webpack_require__(31), asyncTag = "[object AsyncFunction]", funcTag = "[object Function]", genTag = "[object GeneratorFunction]", proxyTag = "[object Proxy]"; + var baseGetTag = __webpack_require__(41), isObject = __webpack_require__(32), asyncTag = "[object AsyncFunction]", funcTag = "[object Function]", genTag = "[object GeneratorFunction]", proxyTag = "[object Proxy]"; module.exports = isFunction; }, function(module, __webpack_exports__, __webpack_require__) { "use strict"; @@ -554,7 +557,7 @@ var _bundleJs = []byte((((((((((`!function(modules) { }), __webpack_require__.d(__webpack_exports__, "c", function() { return getLinearRegression; }); - var __WEBPACK_IMPORTED_MODULE_0_lodash_get__ = __webpack_require__(164), __WEBPACK_IMPORTED_MODULE_0_lodash_get___default = __webpack_require__.n(__WEBPACK_IMPORTED_MODULE_0_lodash_get__), __WEBPACK_IMPORTED_MODULE_1_lodash_isArray__ = __webpack_require__(11), __WEBPACK_IMPORTED_MODULE_1_lodash_isArray___default = __webpack_require__.n(__WEBPACK_IMPORTED_MODULE_1_lodash_isArray__), __WEBPACK_IMPORTED_MODULE_2_lodash_isNaN__ = __webpack_require__(116), __WEBPACK_IMPORTED_MODULE_2_lodash_isNaN___default = __webpack_require__.n(__WEBPACK_IMPORTED_MODULE_2_lodash_isNaN__), __WEBPACK_IMPORTED_MODULE_3_lodash_isNumber__ = __webpack_require__(169), __WEBPACK_IMPORTED_MODULE_3_lodash_isNumber___default = __webpack_require__.n(__WEBPACK_IMPORTED_MODULE_3_lodash_isNumber__), __WEBPACK_IMPORTED_MODULE_4_lodash_isString__ = __webpack_require__(163), __WEBPACK_IMPORTED_MODULE_4_lodash_isString___default = __webpack_require__.n(__WEBPACK_IMPORTED_MODULE_4_lodash_isString__), mathSign = function(value) { + var __WEBPACK_IMPORTED_MODULE_0_lodash_get__ = __webpack_require__(174), __WEBPACK_IMPORTED_MODULE_0_lodash_get___default = __webpack_require__.n(__WEBPACK_IMPORTED_MODULE_0_lodash_get__), __WEBPACK_IMPORTED_MODULE_1_lodash_isArray__ = __webpack_require__(13), __WEBPACK_IMPORTED_MODULE_1_lodash_isArray___default = __webpack_require__.n(__WEBPACK_IMPORTED_MODULE_1_lodash_isArray__), __WEBPACK_IMPORTED_MODULE_2_lodash_isNaN__ = __webpack_require__(120), __WEBPACK_IMPORTED_MODULE_2_lodash_isNaN___default = __webpack_require__.n(__WEBPACK_IMPORTED_MODULE_2_lodash_isNaN__), __WEBPACK_IMPORTED_MODULE_3_lodash_isNumber__ = __webpack_require__(272), __WEBPACK_IMPORTED_MODULE_3_lodash_isNumber___default = __webpack_require__.n(__WEBPACK_IMPORTED_MODULE_3_lodash_isNumber__), __WEBPACK_IMPORTED_MODULE_4_lodash_isString__ = __webpack_require__(173), __WEBPACK_IMPORTED_MODULE_4_lodash_isString___default = __webpack_require__.n(__WEBPACK_IMPORTED_MODULE_4_lodash_isString__), mathSign = function(value) { return 0 === value ? 0 : value > 0 ? 1 : -1; }, isPercent = function(value) { return __WEBPACK_IMPORTED_MODULE_4_lodash_isString___default()(value) && value.indexOf("%") === value.length - 1; @@ -623,12 +626,12 @@ var _bundleJs = []byte((((((((((`!function(modules) { Object.defineProperty(exports, "__esModule", { value: !0 }), exports.sheetsManager = void 0; - var _keys = __webpack_require__(50), _keys2 = _interopRequireDefault(_keys), _extends2 = __webpack_require__(6), _extends3 = _interopRequireDefault(_extends2), _getPrototypeOf = __webpack_require__(26), _getPrototypeOf2 = _interopRequireDefault(_getPrototypeOf), _classCallCheck2 = __webpack_require__(27), _classCallCheck3 = _interopRequireDefault(_classCallCheck2), _createClass2 = __webpack_require__(28), _createClass3 = _interopRequireDefault(_createClass2), _possibleConstructorReturn2 = __webpack_require__(29), _possibleConstructorReturn3 = _interopRequireDefault(_possibleConstructorReturn2), _inherits2 = __webpack_require__(30), _inherits3 = _interopRequireDefault(_inherits2), _objectWithoutProperties2 = __webpack_require__(7), _objectWithoutProperties3 = _interopRequireDefault(_objectWithoutProperties2), _map = __webpack_require__(401), _map2 = _interopRequireDefault(_map), _minSafeInteger = __webpack_require__(417), _minSafeInteger2 = _interopRequireDefault(_minSafeInteger), _react = __webpack_require__(0), _react2 = _interopRequireDefault(_react), _propTypes = __webpack_require__(1), _propTypes2 = _interopRequireDefault(_propTypes), _warning = __webpack_require__(12), _warning2 = _interopRequireDefault(_warning), _hoistNonReactStatics = __webpack_require__(151), _hoistNonReactStatics2 = _interopRequireDefault(_hoistNonReactStatics), _getDisplayName = __webpack_require__(226), _getDisplayName2 = _interopRequireDefault(_getDisplayName), _wrapDisplayName = __webpack_require__(75), _wrapDisplayName2 = _interopRequireDefault(_wrapDisplayName), _contextTypes = __webpack_require__(420), _contextTypes2 = _interopRequireDefault(_contextTypes), _jss = __webpack_require__(228), _ns = __webpack_require__(227), ns = function(obj) { + var _keys = __webpack_require__(55), _keys2 = _interopRequireDefault(_keys), _extends2 = __webpack_require__(6), _extends3 = _interopRequireDefault(_extends2), _getPrototypeOf = __webpack_require__(26), _getPrototypeOf2 = _interopRequireDefault(_getPrototypeOf), _classCallCheck2 = __webpack_require__(27), _classCallCheck3 = _interopRequireDefault(_classCallCheck2), _createClass2 = __webpack_require__(28), _createClass3 = _interopRequireDefault(_createClass2), _possibleConstructorReturn2 = __webpack_require__(29), _possibleConstructorReturn3 = _interopRequireDefault(_possibleConstructorReturn2), _inherits2 = __webpack_require__(30), _inherits3 = _interopRequireDefault(_inherits2), _objectWithoutProperties2 = __webpack_require__(7), _objectWithoutProperties3 = _interopRequireDefault(_objectWithoutProperties2), _map = __webpack_require__(440), _map2 = _interopRequireDefault(_map), _minSafeInteger = __webpack_require__(456), _minSafeInteger2 = _interopRequireDefault(_minSafeInteger), _react = __webpack_require__(0), _react2 = _interopRequireDefault(_react), _propTypes = __webpack_require__(1), _propTypes2 = _interopRequireDefault(_propTypes), _warning = __webpack_require__(11), _warning2 = _interopRequireDefault(_warning), _hoistNonReactStatics = __webpack_require__(162), _hoistNonReactStatics2 = _interopRequireDefault(_hoistNonReactStatics), _getDisplayName = __webpack_require__(244), _getDisplayName2 = _interopRequireDefault(_getDisplayName), _wrapDisplayName = __webpack_require__(79), _wrapDisplayName2 = _interopRequireDefault(_wrapDisplayName), _contextTypes = __webpack_require__(459), _contextTypes2 = _interopRequireDefault(_contextTypes), _jss = __webpack_require__(246), _ns = __webpack_require__(245), ns = function(obj) { if (obj && obj.__esModule) return obj; var newObj = {}; if (null != obj) for (var key in obj) Object.prototype.hasOwnProperty.call(obj, key) && (newObj[key] = obj[key]); return newObj.default = obj, newObj; - }(_ns), _jssPreset = __webpack_require__(442), _jssPreset2 = _interopRequireDefault(_jssPreset), _createMuiTheme = __webpack_require__(150), _createMuiTheme2 = _interopRequireDefault(_createMuiTheme), _themeListener = __webpack_require__(149), _themeListener2 = _interopRequireDefault(_themeListener), _createGenerateClassName = __webpack_require__(455), _createGenerateClassName2 = _interopRequireDefault(_createGenerateClassName), _getStylesCreator = __webpack_require__(456), _getStylesCreator2 = _interopRequireDefault(_getStylesCreator), jss = (0, + }(_ns), _jssPreset = __webpack_require__(481), _jssPreset2 = _interopRequireDefault(_jssPreset), _createMuiTheme = __webpack_require__(161), _createMuiTheme2 = _interopRequireDefault(_createMuiTheme), _themeListener = __webpack_require__(160), _themeListener2 = _interopRequireDefault(_themeListener), _createGenerateClassName = __webpack_require__(494), _createGenerateClassName2 = _interopRequireDefault(_createGenerateClassName), _getStylesCreator = __webpack_require__(495), _getStylesCreator2 = _interopRequireDefault(_getStylesCreator), jss = (0, _jss.create)((0, _jssPreset2.default)()), generateClassName = (0, _createGenerateClassName2.default)(), indexCounter = _minSafeInteger2.default, sheetsManager = exports.sheetsManager = new _map2.default(), noopTheme = {}, defaultTheme = void 0, withStyles = function(stylesOrCreator) { var options = arguments.length > 1 && void 0 !== arguments[1] ? arguments[1] : {}; return function(Component) { @@ -730,10 +733,10 @@ var _bundleJs = []byte((((((((((`!function(modules) { renderedClasses = sheetsManagerTheme.sheet.classes; } classes = classesProp ? (0, _extends3.default)({}, renderedClasses, (0, _keys2.default)(classesProp).reduce(function(accumulator, key) { - return "production" !== process.env.NODE_ENV && (0, _warning2.default)(renderedClasses[key] || _this3.disableStylesGeneration, [ "Material-UI: the key ` + "`") + (`" + key + "` + ("`" + ` provided to the classes property is not implemented in " + (0, + return "production" !== process.env.NODE_ENV && (0, _warning2.default)(renderedClasses[key] || _this3.disableStylesGeneration, [ "Material-UI: the key ` + ("`" + `" + key + "`)) + ("`" + (` provided to the classes property is not implemented in " + (0, _getDisplayName2.default)(Component) + ".", "You can only override one of the following: " + (0, _keys2.default)(renderedClasses).join(",") ].join("\n")), "production" !== process.env.NODE_ENV && (0, - _warning2.default)(!classesProp[key] || "string" == typeof classesProp[key], [ "Material-UI: the key `))) + (("`" + (`" + key + "` + "`")) + (` provided to the classes property is not valid for " + (0, + _warning2.default)(!classesProp[key] || "string" == typeof classesProp[key], [ "Material-UI: the key ` + "`"))) + ((`" + key + "` + ("`" + ` provided to the classes property is not valid for " + (0, _getDisplayName2.default)(Component) + ".", "You need to provide a non empty string instead of: " + classesProp[key] + "." ].join("\n")), classesProp[key] && (accumulator[key] = renderedClasses[key] + " " + classesProp[key]), accumulator; @@ -761,9 +764,6 @@ var _bundleJs = []byte((((((((((`!function(modules) { }; exports.default = withStyles; }).call(exports, __webpack_require__(2)); -}, function(module, exports) { - var isArray = Array.isArray; - module.exports = isArray; }, function(module, exports, __webpack_require__) { "use strict"; (function(process) { @@ -772,7 +772,7 @@ var _bundleJs = []byte((((((((((`!function(modules) { var len = arguments.length; args = new Array(len > 2 ? len - 2 : 0); for (var key = 2; key < len; key++) args[key - 2] = arguments[key]; - if (void 0 === format) throw new Error("` + ("`" + `warning(condition, format, ...args)`)))) + ((("`" + (` requires a warning message argument"); + if (void 0 === format) throw new Error("`)) + ("`" + (`warning(condition, format, ...args)` + "`")))) + (((` requires a warning message argument"); if (format.length < 10 || /^[s\W]*$/.test(format)) throw new Error("The warning format should be able to uniquely identify this warning. Please, use a more descriptive format than: " + format); if (!condition) { var argIndex = 0, message = "Warning: " + format.replace(/%s/g, function() { @@ -788,7 +788,7 @@ var _bundleJs = []byte((((((((((`!function(modules) { }, function(module, exports, __webpack_require__) { "use strict"; exports.__esModule = !0; - var _defineProperty = __webpack_require__(142), _defineProperty2 = function(obj) { + var _defineProperty = __webpack_require__(154), _defineProperty2 = function(obj) { return obj && obj.__esModule ? obj : { default: obj }; @@ -801,6 +801,9 @@ var _bundleJs = []byte((((((((((`!function(modules) { writable: !0 }) : obj[key] = value, obj; }; +}, function(module, exports) { + var isArray = Array.isArray; + module.exports = isArray; }, function(module, __webpack_exports__, __webpack_require__) { "use strict"; function _objectWithoutProperties(obj, keys) { @@ -826,7 +829,7 @@ var _bundleJs = []byte((((((((((`!function(modules) { }; Layer.propTypes = propTypes, __webpack_exports__.a = Layer; }, function(module, exports, __webpack_require__) { - var global = __webpack_require__(157), core = __webpack_require__(158), hide = __webpack_require__(245), redefine = __webpack_require__(536), ctx = __webpack_require__(539), $export = function(type, name, source) { + var global = __webpack_require__(167), core = __webpack_require__(168), hide = __webpack_require__(266), redefine = __webpack_require__(581), ctx = __webpack_require__(584), $export = function(type, name, source) { var key, own, out, exp, IS_FORCED = type & $export.F, IS_GLOBAL = type & $export.G, IS_STATIC = type & $export.S, IS_PROTO = type & $export.P, IS_BIND = type & $export.B, target = IS_GLOBAL ? global : IS_STATIC ? global[name] || (global[name] = {}) : (global[name] || {}).prototype, exports = IS_GLOBAL ? core : core[name] || (core[name] = {}), expProto = exports.prototype || (exports.prototype = {}); IS_GLOBAL && (source = name); for (key in source) own = !IS_FORCED && target && void 0 !== target[key], out = (own ? target : source)[key], @@ -914,8 +917,8 @@ var _bundleJs = []byte((((((((((`!function(modules) { }), __webpack_require__.d(__webpack_exports__, "y", function() { return parseDomainOfCategoryAxis; }); - var __WEBPACK_IMPORTED_MODULE_0_lodash_isEqual__ = __webpack_require__(34), __WEBPACK_IMPORTED_MODULE_0_lodash_isEqual___default = __webpack_require__.n(__WEBPACK_IMPORTED_MODULE_0_lodash_isEqual__), __WEBPACK_IMPORTED_MODULE_1_lodash_sortBy__ = __webpack_require__(284), __WEBPACK_IMPORTED_MODULE_1_lodash_sortBy___default = __webpack_require__.n(__WEBPACK_IMPORTED_MODULE_1_lodash_sortBy__), __WEBPACK_IMPORTED_MODULE_2_lodash_isNaN__ = __webpack_require__(116), __WEBPACK_IMPORTED_MODULE_2_lodash_isNaN___default = __webpack_require__.n(__WEBPACK_IMPORTED_MODULE_2_lodash_isNaN__), __WEBPACK_IMPORTED_MODULE_3_lodash_isString__ = __webpack_require__(163), __WEBPACK_IMPORTED_MODULE_3_lodash_isString___default = __webpack_require__.n(__WEBPACK_IMPORTED_MODULE_3_lodash_isString__), __WEBPACK_IMPORTED_MODULE_4_lodash_max__ = __webpack_require__(700), __WEBPACK_IMPORTED_MODULE_4_lodash_max___default = __webpack_require__.n(__WEBPACK_IMPORTED_MODULE_4_lodash_max__), __WEBPACK_IMPORTED_MODULE_5_lodash_min__ = __webpack_require__(289), __WEBPACK_IMPORTED_MODULE_5_lodash_min___default = __webpack_require__.n(__WEBPACK_IMPORTED_MODULE_5_lodash_min__), __WEBPACK_IMPORTED_MODULE_6_lodash_isArray__ = __webpack_require__(11), __WEBPACK_IMPORTED_MODULE_6_lodash_isArray___default = __webpack_require__.n(__WEBPACK_IMPORTED_MODULE_6_lodash_isArray__), __WEBPACK_IMPORTED_MODULE_7_lodash_flatMap__ = __webpack_require__(701), __WEBPACK_IMPORTED_MODULE_7_lodash_flatMap___default = __webpack_require__.n(__WEBPACK_IMPORTED_MODULE_7_lodash_flatMap__), __WEBPACK_IMPORTED_MODULE_8_lodash_isFunction__ = __webpack_require__(8), __WEBPACK_IMPORTED_MODULE_8_lodash_isFunction___default = __webpack_require__.n(__WEBPACK_IMPORTED_MODULE_8_lodash_isFunction__), __WEBPACK_IMPORTED_MODULE_9_lodash_get__ = __webpack_require__(164), __WEBPACK_IMPORTED_MODULE_9_lodash_get___default = __webpack_require__.n(__WEBPACK_IMPORTED_MODULE_9_lodash_get__), __WEBPACK_IMPORTED_MODULE_10_lodash_isNil__ = __webpack_require__(20), __WEBPACK_IMPORTED_MODULE_10_lodash_isNil___default = __webpack_require__.n(__WEBPACK_IMPORTED_MODULE_10_lodash_isNil__), __WEBPACK_IMPORTED_MODULE_11_recharts_scale__ = __webpack_require__(703), __WEBPACK_IMPORTED_MODULE_12_d3_scale__ = (__webpack_require__.n(__WEBPACK_IMPORTED_MODULE_11_recharts_scale__), - __webpack_require__(292)), __WEBPACK_IMPORTED_MODULE_13_d3_shape__ = __webpack_require__(172), __WEBPACK_IMPORTED_MODULE_14__DataUtils__ = __webpack_require__(9), __WEBPACK_IMPORTED_MODULE_15__cartesian_ReferenceDot__ = __webpack_require__(325), __WEBPACK_IMPORTED_MODULE_16__cartesian_ReferenceLine__ = __webpack_require__(326), __WEBPACK_IMPORTED_MODULE_17__cartesian_ReferenceArea__ = __webpack_require__(327), __WEBPACK_IMPORTED_MODULE_18__cartesian_ErrorBar__ = __webpack_require__(92), __WEBPACK_IMPORTED_MODULE_19__component_Legend__ = __webpack_require__(170), __WEBPACK_IMPORTED_MODULE_20__ReactUtils__ = __webpack_require__(4), _extends = Object.assign || function(target) { + var __WEBPACK_IMPORTED_MODULE_0_lodash_isEqual__ = __webpack_require__(45), __WEBPACK_IMPORTED_MODULE_0_lodash_isEqual___default = __webpack_require__.n(__WEBPACK_IMPORTED_MODULE_0_lodash_isEqual__), __WEBPACK_IMPORTED_MODULE_1_lodash_sortBy__ = __webpack_require__(321), __WEBPACK_IMPORTED_MODULE_1_lodash_sortBy___default = __webpack_require__.n(__WEBPACK_IMPORTED_MODULE_1_lodash_sortBy__), __WEBPACK_IMPORTED_MODULE_2_lodash_isNaN__ = __webpack_require__(120), __WEBPACK_IMPORTED_MODULE_2_lodash_isNaN___default = __webpack_require__.n(__WEBPACK_IMPORTED_MODULE_2_lodash_isNaN__), __WEBPACK_IMPORTED_MODULE_3_lodash_isString__ = __webpack_require__(173), __WEBPACK_IMPORTED_MODULE_3_lodash_isString___default = __webpack_require__.n(__WEBPACK_IMPORTED_MODULE_3_lodash_isString__), __WEBPACK_IMPORTED_MODULE_4_lodash_max__ = __webpack_require__(841), __WEBPACK_IMPORTED_MODULE_4_lodash_max___default = __webpack_require__.n(__WEBPACK_IMPORTED_MODULE_4_lodash_max__), __WEBPACK_IMPORTED_MODULE_5_lodash_min__ = __webpack_require__(328), __WEBPACK_IMPORTED_MODULE_5_lodash_min___default = __webpack_require__.n(__WEBPACK_IMPORTED_MODULE_5_lodash_min__), __WEBPACK_IMPORTED_MODULE_6_lodash_isArray__ = __webpack_require__(13), __WEBPACK_IMPORTED_MODULE_6_lodash_isArray___default = __webpack_require__.n(__WEBPACK_IMPORTED_MODULE_6_lodash_isArray__), __WEBPACK_IMPORTED_MODULE_7_lodash_flatMap__ = __webpack_require__(842), __WEBPACK_IMPORTED_MODULE_7_lodash_flatMap___default = __webpack_require__.n(__WEBPACK_IMPORTED_MODULE_7_lodash_flatMap__), __WEBPACK_IMPORTED_MODULE_8_lodash_isFunction__ = __webpack_require__(8), __WEBPACK_IMPORTED_MODULE_8_lodash_isFunction___default = __webpack_require__.n(__WEBPACK_IMPORTED_MODULE_8_lodash_isFunction__), __WEBPACK_IMPORTED_MODULE_9_lodash_get__ = __webpack_require__(174), __WEBPACK_IMPORTED_MODULE_9_lodash_get___default = __webpack_require__.n(__WEBPACK_IMPORTED_MODULE_9_lodash_get__), __WEBPACK_IMPORTED_MODULE_10_lodash_isNil__ = __webpack_require__(20), __WEBPACK_IMPORTED_MODULE_10_lodash_isNil___default = __webpack_require__.n(__WEBPACK_IMPORTED_MODULE_10_lodash_isNil__), __WEBPACK_IMPORTED_MODULE_11_recharts_scale__ = __webpack_require__(844), __WEBPACK_IMPORTED_MODULE_12_d3_scale__ = (__webpack_require__.n(__WEBPACK_IMPORTED_MODULE_11_recharts_scale__), + __webpack_require__(331)), __WEBPACK_IMPORTED_MODULE_13_d3_shape__ = __webpack_require__(182), __WEBPACK_IMPORTED_MODULE_14__DataUtils__ = __webpack_require__(9), __WEBPACK_IMPORTED_MODULE_15__cartesian_ReferenceDot__ = __webpack_require__(364), __WEBPACK_IMPORTED_MODULE_16__cartesian_ReferenceLine__ = __webpack_require__(365), __WEBPACK_IMPORTED_MODULE_17__cartesian_ReferenceArea__ = __webpack_require__(366), __WEBPACK_IMPORTED_MODULE_18__cartesian_ErrorBar__ = __webpack_require__(95), __WEBPACK_IMPORTED_MODULE_19__component_Legend__ = __webpack_require__(180), __WEBPACK_IMPORTED_MODULE_20__ReactUtils__ = __webpack_require__(4), _extends = Object.assign || function(target) { for (var i = 1; i < arguments.length; i++) { var source = arguments[i]; for (var key in source) Object.prototype.hasOwnProperty.call(source, key) && (target[key] = source[key]); @@ -1406,7 +1409,7 @@ var _bundleJs = []byte((((((((((`!function(modules) { }; }, function(module, exports) { var core = module.exports = { - version: "2.5.3" + version: "2.5.7" }; "number" == typeof __e && (__e = core); }, function(module, __webpack_exports__, __webpack_require__) { @@ -1449,10 +1452,10 @@ var _bundleJs = []byte((((((((((`!function(modules) { __webpack_exports__.a = newInterval; var t0 = new Date(), t1 = new Date(); }, function(module, exports, __webpack_require__) { - var global = __webpack_require__(24), core = __webpack_require__(17), ctx = __webpack_require__(46), hide = __webpack_require__(40), $export = function(type, name, source) { + var global = __webpack_require__(24), core = __webpack_require__(17), ctx = __webpack_require__(51), hide = __webpack_require__(39), has = __webpack_require__(54), $export = function(type, name, source) { var key, own, out, IS_FORCED = type & $export.F, IS_GLOBAL = type & $export.G, IS_STATIC = type & $export.S, IS_PROTO = type & $export.P, IS_BIND = type & $export.B, IS_WRAP = type & $export.W, exports = IS_GLOBAL ? core : core[name] || (core[name] = {}), expProto = exports.prototype, target = IS_GLOBAL ? global : IS_STATIC ? global[name] : (global[name] || {}).prototype; IS_GLOBAL && (source = name); - for (key in source) (own = !IS_FORCED && target && void 0 !== target[key]) && key in exports || (out = own ? target[key] : source[key], + for (key in source) (own = !IS_FORCED && target && void 0 !== target[key]) && has(exports, key) || (out = own ? target[key] : source[key], exports[key] = IS_GLOBAL && "function" != typeof target[key] ? source[key] : IS_BIND && own ? ctx(out, global) : IS_WRAP && target[key] == out ? function(C) { var F = function(a, b, c) { if (this instanceof C) { @@ -1482,12 +1485,12 @@ var _bundleJs = []byte((((((((((`!function(modules) { } module.exports = isNil; }, function(module, exports, __webpack_require__) { - var store = __webpack_require__(139)("wks"), uid = __webpack_require__(99), Symbol = __webpack_require__(24).Symbol, USE_SYMBOL = "function" == typeof Symbol; + var store = __webpack_require__(151)("wks"), uid = __webpack_require__(103), Symbol = __webpack_require__(24).Symbol, USE_SYMBOL = "function" == typeof Symbol; (module.exports = function(name) { return store[name] || (store[name] = USE_SYMBOL && Symbol[name] || (USE_SYMBOL ? Symbol : uid)("Symbol." + name)); }).store = store; }, function(module, exports, __webpack_require__) { - var anObject = __webpack_require__(47), IE8_DOM_DEFINE = __webpack_require__(206), toPrimitive = __webpack_require__(133), dP = Object.defineProperty; + var anObject = __webpack_require__(52), IE8_DOM_DEFINE = __webpack_require__(224), toPrimitive = __webpack_require__(145), dP = Object.defineProperty; exports.f = __webpack_require__(25) ? Object.defineProperty : function(O, P, Attributes) { if (anObject(O), P = toPrimitive(P, !0), anObject(Attributes), IE8_DOM_DEFINE) try { return dP(O, P, Attributes); @@ -1617,7 +1620,7 @@ var _bundleJs = []byte((((((((((`!function(modules) { var global = module.exports = "undefined" != typeof window && window.Math == Math ? window : "undefined" != typeof self && self.Math == Math ? self : Function("return this")(); "number" == typeof __g && (__g = global); }, function(module, exports, __webpack_require__) { - module.exports = !__webpack_require__(48)(function() { + module.exports = !__webpack_require__(53)(function() { return 7 != Object.defineProperty({}, "a", { get: function() { return 7; @@ -1626,7 +1629,7 @@ var _bundleJs = []byte((((((((((`!function(modules) { }); }, function(module, exports, __webpack_require__) { module.exports = { - default: __webpack_require__(355), + default: __webpack_require__(394), __esModule: !0 }; }, function(module, exports, __webpack_require__) { @@ -1637,7 +1640,7 @@ var _bundleJs = []byte((((((((((`!function(modules) { }, function(module, exports, __webpack_require__) { "use strict"; exports.__esModule = !0; - var _defineProperty = __webpack_require__(142), _defineProperty2 = function(obj) { + var _defineProperty = __webpack_require__(154), _defineProperty2 = function(obj) { return obj && obj.__esModule ? obj : { default: obj }; @@ -1658,7 +1661,7 @@ var _bundleJs = []byte((((((((((`!function(modules) { }, function(module, exports, __webpack_require__) { "use strict"; exports.__esModule = !0; - var _typeof2 = __webpack_require__(101), _typeof3 = function(obj) { + var _typeof2 = __webpack_require__(105), _typeof3 = function(obj) { return obj && obj.__esModule ? obj : { default: obj }; @@ -1675,7 +1678,7 @@ var _bundleJs = []byte((((((((((`!function(modules) { }; } exports.__esModule = !0; - var _setPrototypeOf = __webpack_require__(372), _setPrototypeOf2 = _interopRequireDefault(_setPrototypeOf), _create = __webpack_require__(376), _create2 = _interopRequireDefault(_create), _typeof2 = __webpack_require__(101), _typeof3 = _interopRequireDefault(_typeof2); + var _setPrototypeOf = __webpack_require__(411), _setPrototypeOf2 = _interopRequireDefault(_setPrototypeOf), _create = __webpack_require__(415), _create2 = _interopRequireDefault(_create), _typeof2 = __webpack_require__(105), _typeof3 = _interopRequireDefault(_typeof2); exports.default = function(subClass, superClass) { if ("function" != typeof superClass && null !== superClass) throw new TypeError("Super expression must either be null or a function, not " + (void 0 === superClass ? "undefined" : (0, _typeof3.default)(superClass))); @@ -1688,15 +1691,15 @@ var _bundleJs = []byte((((((((((`!function(modules) { } }), superClass && (_setPrototypeOf2.default ? (0, _setPrototypeOf2.default)(subClass, superClass) : subClass.__proto__ = superClass); }; +}, function(module, exports, __webpack_require__) { + var freeGlobal = __webpack_require__(268), freeSelf = "object" == typeof self && self && self.Object === Object && self, root = freeGlobal || freeSelf || Function("return this")(); + module.exports = root; }, function(module, exports) { function isObject(value) { var type = typeof value; return null != value && ("object" == type || "function" == type); } module.exports = isObject; -}, function(module, exports, __webpack_require__) { - var freeGlobal = __webpack_require__(243), freeSelf = "object" == typeof self && self && self.Object === Object && self, root = freeGlobal || freeSelf || Function("return this")(); - module.exports = root; }, function(module, exports, __webpack_require__) { "use strict"; function _interopRequireDefault(obj) { @@ -1707,53 +1710,48 @@ var _bundleJs = []byte((((((((((`!function(modules) { Object.defineProperty(exports, "__esModule", { value: !0 }), exports.translateStyle = exports.AnimateGroup = exports.configBezier = exports.configSpring = void 0; - var _Animate = __webpack_require__(264), _Animate2 = _interopRequireDefault(_Animate), _easing = __webpack_require__(277), _util = __webpack_require__(122), _AnimateGroup = __webpack_require__(681), _AnimateGroup2 = _interopRequireDefault(_AnimateGroup); + var _Animate = __webpack_require__(287), _Animate2 = _interopRequireDefault(_Animate), _easing = __webpack_require__(305), _util = __webpack_require__(132), _AnimateGroup = __webpack_require__(762), _AnimateGroup2 = _interopRequireDefault(_AnimateGroup); exports.configSpring = _easing.configSpring, exports.configBezier = _easing.configBezier, exports.AnimateGroup = _AnimateGroup2.default, exports.translateStyle = _util.translateStyle, exports.default = _Animate2.default; -}, function(module, exports, __webpack_require__) { - function isEqual(value, other) { - return baseIsEqual(value, other); - } - var baseIsEqual = __webpack_require__(177); - module.exports = isEqual; +}, function(module, exports) { + var isArray = Array.isArray; + module.exports = isArray; }, function(module, exports) { module.exports = function(it) { return "object" == typeof it ? null !== it : "function" == typeof it; }; -}, function(module, exports) { - function isObjectLike(value) { - return null != value && "object" == typeof value; - } - module.exports = isObjectLike; +}, function(module, exports, __webpack_require__) { + var freeGlobal = __webpack_require__(292), freeSelf = "object" == typeof self && self && self.Object === Object && self, root = freeGlobal || freeSelf || Function("return this")(); + module.exports = root; }, function(module, __webpack_exports__, __webpack_require__) { "use strict"; - var __WEBPACK_IMPORTED_MODULE_0__src_bisect__ = __webpack_require__(293); + var __WEBPACK_IMPORTED_MODULE_0__src_bisect__ = __webpack_require__(332); __webpack_require__.d(__webpack_exports__, "b", function() { return __WEBPACK_IMPORTED_MODULE_0__src_bisect__.a; }); - var __WEBPACK_IMPORTED_MODULE_1__src_ascending__ = __webpack_require__(64); + var __WEBPACK_IMPORTED_MODULE_1__src_ascending__ = __webpack_require__(69); __webpack_require__.d(__webpack_exports__, "a", function() { return __WEBPACK_IMPORTED_MODULE_1__src_ascending__.a; }); - var __WEBPACK_IMPORTED_MODULE_2__src_bisector__ = __webpack_require__(294); + var __WEBPACK_IMPORTED_MODULE_2__src_bisector__ = __webpack_require__(333); __webpack_require__.d(__webpack_exports__, "c", function() { return __WEBPACK_IMPORTED_MODULE_2__src_bisector__.a; }); - var __WEBPACK_IMPORTED_MODULE_18__src_quantile__ = (__webpack_require__(707), __webpack_require__(708), - __webpack_require__(296), __webpack_require__(298), __webpack_require__(709), __webpack_require__(712), - __webpack_require__(713), __webpack_require__(302), __webpack_require__(714), __webpack_require__(715), - __webpack_require__(716), __webpack_require__(717), __webpack_require__(303), __webpack_require__(295), - __webpack_require__(718), __webpack_require__(184)); + var __WEBPACK_IMPORTED_MODULE_18__src_quantile__ = (__webpack_require__(848), __webpack_require__(849), + __webpack_require__(335), __webpack_require__(337), __webpack_require__(850), __webpack_require__(853), + __webpack_require__(854), __webpack_require__(341), __webpack_require__(855), __webpack_require__(856), + __webpack_require__(857), __webpack_require__(858), __webpack_require__(342), __webpack_require__(334), + __webpack_require__(859), __webpack_require__(204)); __webpack_require__.d(__webpack_exports__, "d", function() { return __WEBPACK_IMPORTED_MODULE_18__src_quantile__.a; }); - var __WEBPACK_IMPORTED_MODULE_19__src_range__ = __webpack_require__(300); + var __WEBPACK_IMPORTED_MODULE_19__src_range__ = __webpack_require__(339); __webpack_require__.d(__webpack_exports__, "e", function() { return __WEBPACK_IMPORTED_MODULE_19__src_range__.a; }); - var __WEBPACK_IMPORTED_MODULE_23__src_ticks__ = (__webpack_require__(719), __webpack_require__(720), - __webpack_require__(721), __webpack_require__(301)); + var __WEBPACK_IMPORTED_MODULE_23__src_ticks__ = (__webpack_require__(860), __webpack_require__(861), + __webpack_require__(862), __webpack_require__(340)); __webpack_require__.d(__webpack_exports__, "h", function() { return __WEBPACK_IMPORTED_MODULE_23__src_ticks__.a; }), __webpack_require__.d(__webpack_exports__, "f", function() { @@ -1761,7 +1759,7 @@ var _bundleJs = []byte((((((((((`!function(modules) { }), __webpack_require__.d(__webpack_exports__, "g", function() { return __WEBPACK_IMPORTED_MODULE_23__src_ticks__.c; }); - __webpack_require__(304), __webpack_require__(297), __webpack_require__(722); + __webpack_require__(343), __webpack_require__(336), __webpack_require__(863); }, function(module, __webpack_exports__, __webpack_require__) { "use strict"; __webpack_require__.d(__webpack_exports__, "d", function() { @@ -1777,33 +1775,39 @@ var _bundleJs = []byte((((((((((`!function(modules) { }); var durationSecond = 1e3, durationMinute = 6e4, durationHour = 36e5, durationDay = 864e5, durationWeek = 6048e5; }, function(module, exports, __webpack_require__) { - "use strict"; - function makeEmptyFunction(arg) { - return function() { - return arg; - }; - } - var emptyFunction = function() {}; - emptyFunction.thatReturns = makeEmptyFunction, emptyFunction.thatReturnsFalse = makeEmptyFunction(!1), - emptyFunction.thatReturnsTrue = makeEmptyFunction(!0), emptyFunction.thatReturnsNull = makeEmptyFunction(null), - emptyFunction.thatReturnsThis = function() { - return this; - }, emptyFunction.thatReturnsArgument = function(arg) { - return arg; - }, module.exports = emptyFunction; -}, function(module, exports, __webpack_require__) { - var dP = __webpack_require__(22), createDesc = __webpack_require__(71); + var dP = __webpack_require__(22), createDesc = __webpack_require__(75); module.exports = __webpack_require__(25) ? function(object, key, value) { return dP.f(object, key, createDesc(1, value)); } : function(object, key, value) { return object[key] = value, object; }; +}, function(module, exports) { + var g; + g = function() { + return this; + }(); + try { + g = g || Function("return this")() || (0, eval)("this"); + } catch (e) { + "object" == typeof window && (g = window); + } + module.exports = g; }, function(module, exports, __webpack_require__) { function baseGetTag(value) { return null == value ? void 0 === value ? undefinedTag : nullTag : symToStringTag && symToStringTag in Object(value) ? getRawTag(value) : objectToString(value); } - var Symbol = __webpack_require__(78), getRawTag = __webpack_require__(522), objectToString = __webpack_require__(523), nullTag = "[object Null]", undefinedTag = "[object Undefined]", symToStringTag = Symbol ? Symbol.toStringTag : void 0; + var Symbol = __webpack_require__(83), getRawTag = __webpack_require__(603), objectToString = __webpack_require__(604), nullTag = "[object Null]", undefinedTag = "[object Undefined]", symToStringTag = Symbol ? Symbol.toStringTag : void 0; module.exports = baseGetTag; +}, function(module, exports) { + function isObjectLike(value) { + return null != value && "object" == typeof value; + } + module.exports = isObjectLike; +}, function(module, exports) { + function isObjectLike(value) { + return null != value && "object" == typeof value; + } + module.exports = isObjectLike; }, function(module, __webpack_exports__, __webpack_require__) { "use strict"; function _toConsumableArray(arr) { @@ -1828,7 +1832,7 @@ var _bundleJs = []byte((((((((((`!function(modules) { className: __WEBPACK_IMPORTED_MODULE_5_classnames___default()("recharts-label", className) }, attrs, positionAttrs), label); } - var __WEBPACK_IMPORTED_MODULE_0_lodash_isObject__ = __webpack_require__(31), __WEBPACK_IMPORTED_MODULE_0_lodash_isObject___default = __webpack_require__.n(__WEBPACK_IMPORTED_MODULE_0_lodash_isObject__), __WEBPACK_IMPORTED_MODULE_1_lodash_isFunction__ = __webpack_require__(8), __WEBPACK_IMPORTED_MODULE_1_lodash_isFunction___default = __webpack_require__.n(__WEBPACK_IMPORTED_MODULE_1_lodash_isFunction__), __WEBPACK_IMPORTED_MODULE_2_lodash_isNil__ = __webpack_require__(20), __WEBPACK_IMPORTED_MODULE_2_lodash_isNil___default = __webpack_require__.n(__WEBPACK_IMPORTED_MODULE_2_lodash_isNil__), __WEBPACK_IMPORTED_MODULE_3_react__ = __webpack_require__(0), __WEBPACK_IMPORTED_MODULE_3_react___default = __webpack_require__.n(__WEBPACK_IMPORTED_MODULE_3_react__), __WEBPACK_IMPORTED_MODULE_4_prop_types__ = __webpack_require__(1), __WEBPACK_IMPORTED_MODULE_4_prop_types___default = __webpack_require__.n(__WEBPACK_IMPORTED_MODULE_4_prop_types__), __WEBPACK_IMPORTED_MODULE_5_classnames__ = __webpack_require__(3), __WEBPACK_IMPORTED_MODULE_5_classnames___default = __webpack_require__.n(__WEBPACK_IMPORTED_MODULE_5_classnames__), __WEBPACK_IMPORTED_MODULE_6__Text__ = __webpack_require__(54), __WEBPACK_IMPORTED_MODULE_7__util_ReactUtils__ = __webpack_require__(4), __WEBPACK_IMPORTED_MODULE_8__util_DataUtils__ = __webpack_require__(9), __WEBPACK_IMPORTED_MODULE_9__util_PolarUtils__ = __webpack_require__(23), _extends = Object.assign || function(target) { + var __WEBPACK_IMPORTED_MODULE_0_lodash_isObject__ = __webpack_require__(32), __WEBPACK_IMPORTED_MODULE_0_lodash_isObject___default = __webpack_require__.n(__WEBPACK_IMPORTED_MODULE_0_lodash_isObject__), __WEBPACK_IMPORTED_MODULE_1_lodash_isFunction__ = __webpack_require__(8), __WEBPACK_IMPORTED_MODULE_1_lodash_isFunction___default = __webpack_require__.n(__WEBPACK_IMPORTED_MODULE_1_lodash_isFunction__), __WEBPACK_IMPORTED_MODULE_2_lodash_isNil__ = __webpack_require__(20), __WEBPACK_IMPORTED_MODULE_2_lodash_isNil___default = __webpack_require__.n(__WEBPACK_IMPORTED_MODULE_2_lodash_isNil__), __WEBPACK_IMPORTED_MODULE_3_react__ = __webpack_require__(0), __WEBPACK_IMPORTED_MODULE_3_react___default = __webpack_require__.n(__WEBPACK_IMPORTED_MODULE_3_react__), __WEBPACK_IMPORTED_MODULE_4_prop_types__ = __webpack_require__(1), __WEBPACK_IMPORTED_MODULE_4_prop_types___default = __webpack_require__.n(__WEBPACK_IMPORTED_MODULE_4_prop_types__), __WEBPACK_IMPORTED_MODULE_5_classnames__ = __webpack_require__(3), __WEBPACK_IMPORTED_MODULE_5_classnames___default = __webpack_require__.n(__WEBPACK_IMPORTED_MODULE_5_classnames__), __WEBPACK_IMPORTED_MODULE_6__Text__ = __webpack_require__(61), __WEBPACK_IMPORTED_MODULE_7__util_ReactUtils__ = __webpack_require__(4), __WEBPACK_IMPORTED_MODULE_8__util_DataUtils__ = __webpack_require__(9), __WEBPACK_IMPORTED_MODULE_9__util_PolarUtils__ = __webpack_require__(23), _extends = Object.assign || function(target) { for (var i = 1; i < arguments.length; i++) { var source = arguments[i]; for (var key in source) Object.prototype.hasOwnProperty.call(source, key) && (target[key] = source[key]); @@ -2038,9 +2042,15 @@ var _bundleJs = []byte((((((((((`!function(modules) { }; Label.parseViewBox = parseViewBox, Label.renderCallByParent = renderCallByParent, __webpack_exports__.a = Label; +}, function(module, exports, __webpack_require__) { + function isEqual(value, other) { + return baseIsEqual(value, other); + } + var baseIsEqual = __webpack_require__(199); + module.exports = isEqual; }, function(module, __webpack_exports__, __webpack_require__) { "use strict"; - var __WEBPACK_IMPORTED_MODULE_0__src_color__ = __webpack_require__(187); + var __WEBPACK_IMPORTED_MODULE_0__src_color__ = __webpack_require__(207); __webpack_require__.d(__webpack_exports__, "a", function() { return __WEBPACK_IMPORTED_MODULE_0__src_color__.e; }), __webpack_require__.d(__webpack_exports__, "f", function() { @@ -2048,13 +2058,13 @@ var _bundleJs = []byte((((((((((`!function(modules) { }), __webpack_require__.d(__webpack_exports__, "d", function() { return __WEBPACK_IMPORTED_MODULE_0__src_color__.f; }); - var __WEBPACK_IMPORTED_MODULE_1__src_lab__ = __webpack_require__(730); + var __WEBPACK_IMPORTED_MODULE_1__src_lab__ = __webpack_require__(871); __webpack_require__.d(__webpack_exports__, "e", function() { return __WEBPACK_IMPORTED_MODULE_1__src_lab__.a; }), __webpack_require__.d(__webpack_exports__, "c", function() { return __WEBPACK_IMPORTED_MODULE_1__src_lab__.b; }); - var __WEBPACK_IMPORTED_MODULE_2__src_cubehelix__ = __webpack_require__(731); + var __WEBPACK_IMPORTED_MODULE_2__src_cubehelix__ = __webpack_require__(872); __webpack_require__.d(__webpack_exports__, "b", function() { return __WEBPACK_IMPORTED_MODULE_2__src_cubehelix__.a; }); @@ -2090,7 +2100,7 @@ var _bundleJs = []byte((((((((((`!function(modules) { })); })) : null; } - var __WEBPACK_IMPORTED_MODULE_0_lodash_isObject__ = __webpack_require__(31), __WEBPACK_IMPORTED_MODULE_0_lodash_isObject___default = __webpack_require__.n(__WEBPACK_IMPORTED_MODULE_0_lodash_isObject__), __WEBPACK_IMPORTED_MODULE_1_lodash_isFunction__ = __webpack_require__(8), __WEBPACK_IMPORTED_MODULE_1_lodash_isFunction___default = __webpack_require__.n(__WEBPACK_IMPORTED_MODULE_1_lodash_isFunction__), __WEBPACK_IMPORTED_MODULE_2_lodash_isNil__ = __webpack_require__(20), __WEBPACK_IMPORTED_MODULE_2_lodash_isNil___default = __webpack_require__.n(__WEBPACK_IMPORTED_MODULE_2_lodash_isNil__), __WEBPACK_IMPORTED_MODULE_3_lodash_last__ = __webpack_require__(781), __WEBPACK_IMPORTED_MODULE_3_lodash_last___default = __webpack_require__.n(__WEBPACK_IMPORTED_MODULE_3_lodash_last__), __WEBPACK_IMPORTED_MODULE_4_lodash_isArray__ = __webpack_require__(11), __WEBPACK_IMPORTED_MODULE_4_lodash_isArray___default = __webpack_require__.n(__WEBPACK_IMPORTED_MODULE_4_lodash_isArray__), __WEBPACK_IMPORTED_MODULE_5_react__ = __webpack_require__(0), __WEBPACK_IMPORTED_MODULE_5_react___default = __webpack_require__.n(__WEBPACK_IMPORTED_MODULE_5_react__), __WEBPACK_IMPORTED_MODULE_6_prop_types__ = __webpack_require__(1), __WEBPACK_IMPORTED_MODULE_6_prop_types___default = __webpack_require__.n(__WEBPACK_IMPORTED_MODULE_6_prop_types__), __WEBPACK_IMPORTED_MODULE_7__Label__ = __webpack_require__(42), __WEBPACK_IMPORTED_MODULE_8__container_Layer__ = __webpack_require__(14), __WEBPACK_IMPORTED_MODULE_9__util_ReactUtils__ = __webpack_require__(4), __WEBPACK_IMPORTED_MODULE_10__util_ChartUtils__ = __webpack_require__(16), _extends = Object.assign || function(target) { + var __WEBPACK_IMPORTED_MODULE_0_lodash_isObject__ = __webpack_require__(32), __WEBPACK_IMPORTED_MODULE_0_lodash_isObject___default = __webpack_require__.n(__WEBPACK_IMPORTED_MODULE_0_lodash_isObject__), __WEBPACK_IMPORTED_MODULE_1_lodash_isFunction__ = __webpack_require__(8), __WEBPACK_IMPORTED_MODULE_1_lodash_isFunction___default = __webpack_require__.n(__WEBPACK_IMPORTED_MODULE_1_lodash_isFunction__), __WEBPACK_IMPORTED_MODULE_2_lodash_isNil__ = __webpack_require__(20), __WEBPACK_IMPORTED_MODULE_2_lodash_isNil___default = __webpack_require__.n(__WEBPACK_IMPORTED_MODULE_2_lodash_isNil__), __WEBPACK_IMPORTED_MODULE_3_lodash_last__ = __webpack_require__(922), __WEBPACK_IMPORTED_MODULE_3_lodash_last___default = __webpack_require__.n(__WEBPACK_IMPORTED_MODULE_3_lodash_last__), __WEBPACK_IMPORTED_MODULE_4_lodash_isArray__ = __webpack_require__(13), __WEBPACK_IMPORTED_MODULE_4_lodash_isArray___default = __webpack_require__.n(__WEBPACK_IMPORTED_MODULE_4_lodash_isArray__), __WEBPACK_IMPORTED_MODULE_5_react__ = __webpack_require__(0), __WEBPACK_IMPORTED_MODULE_5_react___default = __webpack_require__.n(__WEBPACK_IMPORTED_MODULE_5_react__), __WEBPACK_IMPORTED_MODULE_6_prop_types__ = __webpack_require__(1), __WEBPACK_IMPORTED_MODULE_6_prop_types___default = __webpack_require__.n(__WEBPACK_IMPORTED_MODULE_6_prop_types__), __WEBPACK_IMPORTED_MODULE_7__Label__ = __webpack_require__(44), __WEBPACK_IMPORTED_MODULE_8__container_Layer__ = __webpack_require__(14), __WEBPACK_IMPORTED_MODULE_9__util_ReactUtils__ = __webpack_require__(4), __WEBPACK_IMPORTED_MODULE_10__util_ChartUtils__ = __webpack_require__(16), _extends = Object.assign || function(target) { for (var i = 1; i < arguments.length; i++) { var source = arguments[i]; for (var key in source) Object.prototype.hasOwnProperty.call(source, key) && (target[key] = source[key]); @@ -2174,7 +2184,7 @@ var _bundleJs = []byte((((((((((`!function(modules) { } }), superClass && (Object.setPrototypeOf ? Object.setPrototypeOf(subClass, superClass) : subClass.__proto__ = superClass); } - var __WEBPACK_IMPORTED_MODULE_0_lodash_sortBy__ = __webpack_require__(284), __WEBPACK_IMPORTED_MODULE_0_lodash_sortBy___default = __webpack_require__.n(__WEBPACK_IMPORTED_MODULE_0_lodash_sortBy__), __WEBPACK_IMPORTED_MODULE_1_lodash_isFunction__ = __webpack_require__(8), __WEBPACK_IMPORTED_MODULE_1_lodash_isFunction___default = __webpack_require__.n(__WEBPACK_IMPORTED_MODULE_1_lodash_isFunction__), __WEBPACK_IMPORTED_MODULE_2_lodash_range__ = __webpack_require__(334), __WEBPACK_IMPORTED_MODULE_2_lodash_range___default = __webpack_require__.n(__WEBPACK_IMPORTED_MODULE_2_lodash_range__), __WEBPACK_IMPORTED_MODULE_3_lodash_throttle__ = __webpack_require__(790), __WEBPACK_IMPORTED_MODULE_3_lodash_throttle___default = __webpack_require__.n(__WEBPACK_IMPORTED_MODULE_3_lodash_throttle__), __WEBPACK_IMPORTED_MODULE_4_lodash_isNil__ = __webpack_require__(20), __WEBPACK_IMPORTED_MODULE_4_lodash_isNil___default = __webpack_require__.n(__WEBPACK_IMPORTED_MODULE_4_lodash_isNil__), __WEBPACK_IMPORTED_MODULE_5_react__ = __webpack_require__(0), __WEBPACK_IMPORTED_MODULE_5_react___default = __webpack_require__.n(__WEBPACK_IMPORTED_MODULE_5_react__), __WEBPACK_IMPORTED_MODULE_6_prop_types__ = __webpack_require__(1), __WEBPACK_IMPORTED_MODULE_6_prop_types___default = __webpack_require__.n(__WEBPACK_IMPORTED_MODULE_6_prop_types__), __WEBPACK_IMPORTED_MODULE_7_classnames__ = __webpack_require__(3), __WEBPACK_IMPORTED_MODULE_7_classnames___default = __webpack_require__.n(__WEBPACK_IMPORTED_MODULE_7_classnames__), __WEBPACK_IMPORTED_MODULE_8__container_Surface__ = __webpack_require__(79), __WEBPACK_IMPORTED_MODULE_9__container_Layer__ = __webpack_require__(14), __WEBPACK_IMPORTED_MODULE_10__component_Tooltip__ = __webpack_require__(121), __WEBPACK_IMPORTED_MODULE_11__component_Legend__ = __webpack_require__(170), __WEBPACK_IMPORTED_MODULE_12__shape_Curve__ = __webpack_require__(66), __WEBPACK_IMPORTED_MODULE_13__shape_Cross__ = __webpack_require__(328), __WEBPACK_IMPORTED_MODULE_14__shape_Sector__ = __webpack_require__(127), __WEBPACK_IMPORTED_MODULE_15__shape_Dot__ = __webpack_require__(56), __WEBPACK_IMPORTED_MODULE_16__shape_Rectangle__ = __webpack_require__(65), __WEBPACK_IMPORTED_MODULE_17__util_ReactUtils__ = __webpack_require__(4), __WEBPACK_IMPORTED_MODULE_18__cartesian_CartesianAxis__ = __webpack_require__(335), __WEBPACK_IMPORTED_MODULE_19__cartesian_Brush__ = __webpack_require__(333), __WEBPACK_IMPORTED_MODULE_20__util_DOMUtils__ = __webpack_require__(183), __WEBPACK_IMPORTED_MODULE_21__util_DataUtils__ = __webpack_require__(9), __WEBPACK_IMPORTED_MODULE_22__util_ChartUtils__ = __webpack_require__(16), __WEBPACK_IMPORTED_MODULE_23__util_PolarUtils__ = __webpack_require__(23), __WEBPACK_IMPORTED_MODULE_24__util_PureRender__ = __webpack_require__(5), __WEBPACK_IMPORTED_MODULE_25__util_Events__ = __webpack_require__(791), _extends = Object.assign || function(target) { + var __WEBPACK_IMPORTED_MODULE_0_lodash_sortBy__ = __webpack_require__(321), __WEBPACK_IMPORTED_MODULE_0_lodash_sortBy___default = __webpack_require__.n(__WEBPACK_IMPORTED_MODULE_0_lodash_sortBy__), __WEBPACK_IMPORTED_MODULE_1_lodash_isFunction__ = __webpack_require__(8), __WEBPACK_IMPORTED_MODULE_1_lodash_isFunction___default = __webpack_require__.n(__WEBPACK_IMPORTED_MODULE_1_lodash_isFunction__), __WEBPACK_IMPORTED_MODULE_2_lodash_range__ = __webpack_require__(373), __WEBPACK_IMPORTED_MODULE_2_lodash_range___default = __webpack_require__.n(__WEBPACK_IMPORTED_MODULE_2_lodash_range__), __WEBPACK_IMPORTED_MODULE_3_lodash_throttle__ = __webpack_require__(933), __WEBPACK_IMPORTED_MODULE_3_lodash_throttle___default = __webpack_require__.n(__WEBPACK_IMPORTED_MODULE_3_lodash_throttle__), __WEBPACK_IMPORTED_MODULE_4_lodash_isNil__ = __webpack_require__(20), __WEBPACK_IMPORTED_MODULE_4_lodash_isNil___default = __webpack_require__.n(__WEBPACK_IMPORTED_MODULE_4_lodash_isNil__), __WEBPACK_IMPORTED_MODULE_5_react__ = __webpack_require__(0), __WEBPACK_IMPORTED_MODULE_5_react___default = __webpack_require__.n(__WEBPACK_IMPORTED_MODULE_5_react__), __WEBPACK_IMPORTED_MODULE_6_prop_types__ = __webpack_require__(1), __WEBPACK_IMPORTED_MODULE_6_prop_types___default = __webpack_require__.n(__WEBPACK_IMPORTED_MODULE_6_prop_types__), __WEBPACK_IMPORTED_MODULE_7_classnames__ = __webpack_require__(3), __WEBPACK_IMPORTED_MODULE_7_classnames___default = __webpack_require__.n(__WEBPACK_IMPORTED_MODULE_7_classnames__), __WEBPACK_IMPORTED_MODULE_8__container_Surface__ = __webpack_require__(82), __WEBPACK_IMPORTED_MODULE_9__container_Layer__ = __webpack_require__(14), __WEBPACK_IMPORTED_MODULE_10__component_Tooltip__ = __webpack_require__(125), __WEBPACK_IMPORTED_MODULE_11__component_Legend__ = __webpack_require__(180), __WEBPACK_IMPORTED_MODULE_12__shape_Curve__ = __webpack_require__(71), __WEBPACK_IMPORTED_MODULE_13__shape_Cross__ = __webpack_require__(367), __WEBPACK_IMPORTED_MODULE_14__shape_Sector__ = __webpack_require__(139), __WEBPACK_IMPORTED_MODULE_15__shape_Dot__ = __webpack_require__(63), __WEBPACK_IMPORTED_MODULE_16__shape_Rectangle__ = __webpack_require__(70), __WEBPACK_IMPORTED_MODULE_17__util_ReactUtils__ = __webpack_require__(4), __WEBPACK_IMPORTED_MODULE_18__cartesian_CartesianAxis__ = __webpack_require__(374), __WEBPACK_IMPORTED_MODULE_19__cartesian_Brush__ = __webpack_require__(372), __WEBPACK_IMPORTED_MODULE_20__util_DOMUtils__ = __webpack_require__(198), __WEBPACK_IMPORTED_MODULE_21__util_DataUtils__ = __webpack_require__(9), __WEBPACK_IMPORTED_MODULE_22__util_ChartUtils__ = __webpack_require__(16), __WEBPACK_IMPORTED_MODULE_23__util_PolarUtils__ = __webpack_require__(23), __WEBPACK_IMPORTED_MODULE_24__util_PureRender__ = __webpack_require__(5), __WEBPACK_IMPORTED_MODULE_25__util_Events__ = __webpack_require__(934), _extends = Object.assign || function(target) { for (var i = 1; i < arguments.length; i++) { var source = arguments[i]; for (var key in source) Object.prototype.hasOwnProperty.call(source, key) && (target[key] = source[key]); @@ -3194,7 +3204,42 @@ var _bundleJs = []byte((((((((((`!function(modules) { }; __webpack_exports__.a = generateCategoricalChart; }, function(module, exports, __webpack_require__) { - var aFunction = __webpack_require__(205); + "use strict"; + (function(process) { + function invariant(condition, format, a, b, c, d, e, f) { + if (validateFormat(format), !condition) { + var error; + if (void 0 === format) error = new Error("Minified exception occurred; use the non-minified dev environment for the full error message and additional helpful warnings."); else { + var args = [ a, b, c, d, e, f ], argIndex = 0; + error = new Error(format.replace(/%s/g, function() { + return args[argIndex++]; + })), error.name = "Invariant Violation"; + } + throw error.framesToPop = 1, error; + } + } + var validateFormat = function(format) {}; + "production" !== process.env.NODE_ENV && (validateFormat = function(format) { + if (void 0 === format) throw new Error("invariant requires an error message argument"); + }), module.exports = invariant; + }).call(exports, __webpack_require__(2)); +}, function(module, exports, __webpack_require__) { + "use strict"; + function makeEmptyFunction(arg) { + return function() { + return arg; + }; + } + var emptyFunction = function() {}; + emptyFunction.thatReturns = makeEmptyFunction, emptyFunction.thatReturnsFalse = makeEmptyFunction(!1), + emptyFunction.thatReturnsTrue = makeEmptyFunction(!0), emptyFunction.thatReturnsNull = makeEmptyFunction(null), + emptyFunction.thatReturnsThis = function() { + return this; + }, emptyFunction.thatReturnsArgument = function(arg) { + return arg; + }, module.exports = emptyFunction; +}, function(module, exports, __webpack_require__) { + var aFunction = __webpack_require__(223); module.exports = function(fn, that, length) { if (aFunction(fn), void 0 === that) return fn; switch (length) { @@ -3238,7 +3283,7 @@ var _bundleJs = []byte((((((((((`!function(modules) { }; }, function(module, exports, __webpack_require__) { module.exports = { - default: __webpack_require__(382), + default: __webpack_require__(421), __esModule: !0 }; }, function(module, exports, __webpack_require__) { @@ -3285,17 +3330,17 @@ var _bundleJs = []byte((((((((((`!function(modules) { Object.defineProperty(exports, "__esModule", { value: !0 }); - var _typeof2 = __webpack_require__(101), _typeof3 = _interopRequireDefault(_typeof2), _keys = __webpack_require__(50), _keys2 = _interopRequireDefault(_keys); + var _typeof2 = __webpack_require__(105), _typeof3 = _interopRequireDefault(_typeof2), _keys = __webpack_require__(55), _keys2 = _interopRequireDefault(_keys); exports.capitalize = capitalize, exports.contains = contains, exports.findIndex = findIndex, exports.find = find, exports.createChainedFunction = createChainedFunction; - var _warning = __webpack_require__(12), _warning2 = _interopRequireDefault(_warning); + var _warning = __webpack_require__(11), _warning2 = _interopRequireDefault(_warning); }).call(exports, __webpack_require__(2)); }, function(module, exports, __webpack_require__) { function getNative(object, key) { var value = getValue(object, key); return baseIsNative(value) ? value : void 0; } - var baseIsNative = __webpack_require__(564), getValue = __webpack_require__(567); + var baseIsNative = __webpack_require__(611), getValue = __webpack_require__(614); module.exports = getNative; }, function(module, __webpack_exports__, __webpack_require__) { "use strict"; @@ -3304,6 +3349,19 @@ var _bundleJs = []byte((((((((((`!function(modules) { return x; }; }; +}, function(module, exports, __webpack_require__) { + function getNative(object, key) { + var value = getValue(object, key); + return baseIsNative(value) ? value : void 0; + } + var baseIsNative = __webpack_require__(668), getValue = __webpack_require__(673); + module.exports = getNative; +}, function(module, exports, __webpack_require__) { + function baseGetTag(value) { + return null == value ? void 0 === value ? undefinedTag : nullTag : symToStringTag && symToStringTag in Object(value) ? getRawTag(value) : objectToString(value); + } + var Symbol = __webpack_require__(128), getRawTag = __webpack_require__(669), objectToString = __webpack_require__(670), nullTag = "[object Null]", undefinedTag = "[object Undefined]", symToStringTag = Symbol ? Symbol.toStringTag : void 0; + module.exports = baseGetTag; }, function(module, __webpack_exports__, __webpack_require__) { "use strict"; function _objectWithoutProperties(obj, keys) { @@ -3329,7 +3387,7 @@ var _bundleJs = []byte((((((((((`!function(modules) { } }), superClass && (Object.setPrototypeOf ? Object.setPrototypeOf(subClass, superClass) : subClass.__proto__ = superClass); } - var _class, _temp2, __WEBPACK_IMPORTED_MODULE_0_lodash_isNil__ = __webpack_require__(20), __WEBPACK_IMPORTED_MODULE_0_lodash_isNil___default = __webpack_require__.n(__WEBPACK_IMPORTED_MODULE_0_lodash_isNil__), __WEBPACK_IMPORTED_MODULE_1_react__ = __webpack_require__(0), __WEBPACK_IMPORTED_MODULE_1_react___default = __webpack_require__.n(__WEBPACK_IMPORTED_MODULE_1_react__), __WEBPACK_IMPORTED_MODULE_2_prop_types__ = __webpack_require__(1), __WEBPACK_IMPORTED_MODULE_2_prop_types___default = __webpack_require__.n(__WEBPACK_IMPORTED_MODULE_2_prop_types__), __WEBPACK_IMPORTED_MODULE_3_reduce_css_calc__ = __webpack_require__(688), __WEBPACK_IMPORTED_MODULE_3_reduce_css_calc___default = __webpack_require__.n(__WEBPACK_IMPORTED_MODULE_3_reduce_css_calc__), __WEBPACK_IMPORTED_MODULE_4_classnames__ = __webpack_require__(3), __WEBPACK_IMPORTED_MODULE_4_classnames___default = __webpack_require__.n(__WEBPACK_IMPORTED_MODULE_4_classnames__), __WEBPACK_IMPORTED_MODULE_5__util_DataUtils__ = __webpack_require__(9), __WEBPACK_IMPORTED_MODULE_6__util_ReactUtils__ = __webpack_require__(4), __WEBPACK_IMPORTED_MODULE_7__util_DOMUtils__ = __webpack_require__(183), _extends = Object.assign || function(target) { + var _class, _temp2, __WEBPACK_IMPORTED_MODULE_0_lodash_isNil__ = __webpack_require__(20), __WEBPACK_IMPORTED_MODULE_0_lodash_isNil___default = __webpack_require__.n(__WEBPACK_IMPORTED_MODULE_0_lodash_isNil__), __WEBPACK_IMPORTED_MODULE_1_react__ = __webpack_require__(0), __WEBPACK_IMPORTED_MODULE_1_react___default = __webpack_require__.n(__WEBPACK_IMPORTED_MODULE_1_react__), __WEBPACK_IMPORTED_MODULE_2_prop_types__ = __webpack_require__(1), __WEBPACK_IMPORTED_MODULE_2_prop_types___default = __webpack_require__.n(__WEBPACK_IMPORTED_MODULE_2_prop_types__), __WEBPACK_IMPORTED_MODULE_3_reduce_css_calc__ = __webpack_require__(771), __WEBPACK_IMPORTED_MODULE_3_reduce_css_calc___default = __webpack_require__.n(__WEBPACK_IMPORTED_MODULE_3_reduce_css_calc__), __WEBPACK_IMPORTED_MODULE_4_classnames__ = __webpack_require__(3), __WEBPACK_IMPORTED_MODULE_4_classnames___default = __webpack_require__.n(__WEBPACK_IMPORTED_MODULE_4_classnames__), __WEBPACK_IMPORTED_MODULE_5__util_DataUtils__ = __webpack_require__(9), __WEBPACK_IMPORTED_MODULE_6__util_ReactUtils__ = __webpack_require__(4), __WEBPACK_IMPORTED_MODULE_7__util_DOMUtils__ = __webpack_require__(198), _extends = Object.assign || function(target) { for (var i = 1; i < arguments.length; i++) { var source = arguments[i]; for (var key in source) Object.prototype.hasOwnProperty.call(source, key) && (target[key] = source[key]); @@ -3549,12 +3607,12 @@ var _bundleJs = []byte((((((((((`!function(modules) { }, _class = _temp)) || _class; __webpack_exports__.a = Dot; }, function(module, exports, __webpack_require__) { - var IObject = __webpack_require__(134), defined = __webpack_require__(136); + var IObject = __webpack_require__(146), defined = __webpack_require__(148); module.exports = function(it) { return IObject(defined(it)); }; }, function(module, exports, __webpack_require__) { - var defined = __webpack_require__(136); + var defined = __webpack_require__(148); module.exports = function(it) { return Object(defined(it)); }; @@ -3593,7 +3651,7 @@ var _bundleJs = []byte((((((((((`!function(modules) { return protoProps && defineProperties(Constructor.prototype, protoProps), staticProps && defineProperties(Constructor, staticProps), Constructor; }; - }(), _warning = __webpack_require__(12), _warning2 = _interopRequireDefault(_warning), _toCss = __webpack_require__(152), _toCss2 = _interopRequireDefault(_toCss), _toCssValue = __webpack_require__(106), _toCssValue2 = _interopRequireDefault(_toCssValue), StyleRule = function() { + }(), _warning = __webpack_require__(11), _warning2 = _interopRequireDefault(_warning), _toCss = __webpack_require__(163), _toCss2 = _interopRequireDefault(_toCss), _toCssValue = __webpack_require__(110), _toCssValue2 = _interopRequireDefault(_toCssValue), StyleRule = function() { function StyleRule(key, style, options) { _classCallCheck(this, StyleRule), this.type = "style", this.isProcessed = !1; var sheet = options.sheet, Renderer = options.Renderer, selector = options.selector; @@ -3657,34 +3715,17 @@ var _bundleJs = []byte((((((((((`!function(modules) { } ]), StyleRule; }(); exports.default = StyleRule; -}, function(module, exports) { - var g; - g = function() { - return this; - }(); - try { - g = g || Function("return this")() || (0, eval)("this"); - } catch (e) { - "object" == typeof window && (g = window); - } - module.exports = g; }, function(module, exports, __webpack_require__) { function isSymbol(value) { return "symbol" == typeof value || isObjectLike(value) && baseGetTag(value) == symbolTag; } - var baseGetTag = __webpack_require__(41), isObjectLike = __webpack_require__(36), symbolTag = "[object Symbol]"; + var baseGetTag = __webpack_require__(41), isObjectLike = __webpack_require__(42), symbolTag = "[object Symbol]"; module.exports = isSymbol; }, function(module, exports) { function identity(value) { return value; } module.exports = identity; -}, function(module, exports, __webpack_require__) { - function baseIteratee(value) { - return "function" == typeof value ? value : null == value ? identity : "object" == typeof value ? isArray(value) ? baseMatchesProperty(value[0], value[1]) : baseMatches(value) : property(value); - } - var baseMatches = __webpack_require__(671), baseMatchesProperty = __webpack_require__(674), identity = __webpack_require__(62), isArray = __webpack_require__(11), property = __webpack_require__(678); - module.exports = baseIteratee; }, function(module, __webpack_exports__, __webpack_require__) { "use strict"; __webpack_exports__.a = function(a, b) { @@ -3855,7 +3896,7 @@ var _bundleJs = []byte((((((((((`!function(modules) { } }), superClass && (Object.setPrototypeOf ? Object.setPrototypeOf(subClass, superClass) : subClass.__proto__ = superClass); } - var _class, _class2, _temp, __WEBPACK_IMPORTED_MODULE_0_lodash_isArray__ = __webpack_require__(11), __WEBPACK_IMPORTED_MODULE_0_lodash_isArray___default = __webpack_require__.n(__WEBPACK_IMPORTED_MODULE_0_lodash_isArray__), __WEBPACK_IMPORTED_MODULE_1_lodash_isFunction__ = __webpack_require__(8), __WEBPACK_IMPORTED_MODULE_1_lodash_isFunction___default = __webpack_require__.n(__WEBPACK_IMPORTED_MODULE_1_lodash_isFunction__), __WEBPACK_IMPORTED_MODULE_2_react__ = __webpack_require__(0), __WEBPACK_IMPORTED_MODULE_2_react___default = __webpack_require__.n(__WEBPACK_IMPORTED_MODULE_2_react__), __WEBPACK_IMPORTED_MODULE_3_prop_types__ = __webpack_require__(1), __WEBPACK_IMPORTED_MODULE_3_prop_types___default = __webpack_require__.n(__WEBPACK_IMPORTED_MODULE_3_prop_types__), __WEBPACK_IMPORTED_MODULE_4_d3_shape__ = __webpack_require__(172), __WEBPACK_IMPORTED_MODULE_5_classnames__ = __webpack_require__(3), __WEBPACK_IMPORTED_MODULE_5_classnames___default = __webpack_require__.n(__WEBPACK_IMPORTED_MODULE_5_classnames__), __WEBPACK_IMPORTED_MODULE_6__util_PureRender__ = __webpack_require__(5), __WEBPACK_IMPORTED_MODULE_7__util_ReactUtils__ = __webpack_require__(4), __WEBPACK_IMPORTED_MODULE_8__util_DataUtils__ = __webpack_require__(9), _extends = Object.assign || function(target) { + var _class, _class2, _temp, __WEBPACK_IMPORTED_MODULE_0_lodash_isArray__ = __webpack_require__(13), __WEBPACK_IMPORTED_MODULE_0_lodash_isArray___default = __webpack_require__.n(__WEBPACK_IMPORTED_MODULE_0_lodash_isArray__), __WEBPACK_IMPORTED_MODULE_1_lodash_isFunction__ = __webpack_require__(8), __WEBPACK_IMPORTED_MODULE_1_lodash_isFunction___default = __webpack_require__.n(__WEBPACK_IMPORTED_MODULE_1_lodash_isFunction__), __WEBPACK_IMPORTED_MODULE_2_react__ = __webpack_require__(0), __WEBPACK_IMPORTED_MODULE_2_react___default = __webpack_require__.n(__WEBPACK_IMPORTED_MODULE_2_react__), __WEBPACK_IMPORTED_MODULE_3_prop_types__ = __webpack_require__(1), __WEBPACK_IMPORTED_MODULE_3_prop_types___default = __webpack_require__.n(__WEBPACK_IMPORTED_MODULE_3_prop_types__), __WEBPACK_IMPORTED_MODULE_4_d3_shape__ = __webpack_require__(182), __WEBPACK_IMPORTED_MODULE_5_classnames__ = __webpack_require__(3), __WEBPACK_IMPORTED_MODULE_5_classnames___default = __webpack_require__.n(__WEBPACK_IMPORTED_MODULE_5_classnames__), __WEBPACK_IMPORTED_MODULE_6__util_PureRender__ = __webpack_require__(5), __WEBPACK_IMPORTED_MODULE_7__util_ReactUtils__ = __webpack_require__(4), __WEBPACK_IMPORTED_MODULE_8__util_DataUtils__ = __webpack_require__(9), _extends = Object.assign || function(target) { for (var i = 1; i < arguments.length; i++) { var source = arguments[i]; for (var key in source) Object.prototype.hasOwnProperty.call(source, key) && (target[key] = source[key]); @@ -4173,26 +4214,6 @@ var _bundleJs = []byte((((((((((`!function(modules) { } return to; }; -}, function(module, exports, __webpack_require__) { - "use strict"; - (function(process) { - function invariant(condition, format, a, b, c, d, e, f) { - if (validateFormat(format), !condition) { - var error; - if (void 0 === format) error = new Error("Minified exception occurred; use the non-minified dev environment for the full error message and additional helpful warnings."); else { - var args = [ a, b, c, d, e, f ], argIndex = 0; - error = new Error(format.replace(/%s/g, function() { - return args[argIndex++]; - })), error.name = "Invariant Violation"; - } - throw error.framesToPop = 1, error; - } - } - var validateFormat = function(format) {}; - "production" !== process.env.NODE_ENV && (validateFormat = function(format) { - if (void 0 === format) throw new Error("invariant requires an error message argument"); - }), module.exports = invariant; - }).call(exports, __webpack_require__(2)); }, function(module, exports) { module.exports = function(bitmap, value) { return { @@ -4203,7 +4224,7 @@ var _bundleJs = []byte((((((((((`!function(modules) { }; }; }, function(module, exports, __webpack_require__) { - var $keys = __webpack_require__(208), enumBugKeys = __webpack_require__(140); + var $keys = __webpack_require__(226), enumBugKeys = __webpack_require__(152); module.exports = Object.keys || function(O) { return $keys(O, enumBugKeys); }; @@ -4261,7 +4282,7 @@ var _bundleJs = []byte((((((((((`!function(modules) { }, function(module, exports, __webpack_require__) { "use strict"; exports.__esModule = !0; - var _getDisplayName = __webpack_require__(226), _getDisplayName2 = function(obj) { + var _getDisplayName = __webpack_require__(244), _getDisplayName2 = function(obj) { return obj && obj.__esModule ? obj : { default: obj }; @@ -4300,7 +4321,7 @@ var _bundleJs = []byte((((((((((`!function(modules) { return protoProps && defineProperties(Constructor.prototype, protoProps), staticProps && defineProperties(Constructor, staticProps), Constructor; }; - }(), _createRule = __webpack_require__(107), _createRule2 = _interopRequireDefault(_createRule), _linkRule = __webpack_require__(231), _linkRule2 = _interopRequireDefault(_linkRule), _StyleRule = __webpack_require__(59), _StyleRule2 = _interopRequireDefault(_StyleRule), _escape = __webpack_require__(428), _escape2 = _interopRequireDefault(_escape), RuleList = function() { + }(), _createRule = __webpack_require__(111), _createRule2 = _interopRequireDefault(_createRule), _linkRule = __webpack_require__(249), _linkRule2 = _interopRequireDefault(_linkRule), _StyleRule = __webpack_require__(66), _StyleRule2 = _interopRequireDefault(_StyleRule), _escape = __webpack_require__(467), _escape2 = _interopRequireDefault(_escape), RuleList = function() { function RuleList(options) { _classCallCheck(this, RuleList), this.map = {}, this.raw = {}, this.index = [], this.options = options, this.classes = options.classes; @@ -4444,9 +4465,6 @@ var _bundleJs = []byte((((((((((`!function(modules) { color: "rgba(255, 255, 255, 0.54)" } }; -}, function(module, exports, __webpack_require__) { - var root = __webpack_require__(32), Symbol = root.Symbol; - module.exports = Symbol; }, function(module, __webpack_exports__, __webpack_require__) { "use strict"; function _objectWithoutProperties(obj, keys) { @@ -4490,15 +4508,12 @@ var _bundleJs = []byte((((((((((`!function(modules) { children: __WEBPACK_IMPORTED_MODULE_1_prop_types___default.a.oneOfType([ __WEBPACK_IMPORTED_MODULE_1_prop_types___default.a.arrayOf(__WEBPACK_IMPORTED_MODULE_1_prop_types___default.a.node), __WEBPACK_IMPORTED_MODULE_1_prop_types___default.a.node ]) }; Surface.propTypes = propTypes, __webpack_exports__.a = Surface; -}, function(module, exports) { - function arrayMap(array, iteratee) { - for (var index = -1, length = null == array ? 0 : array.length, result = Array(length); ++index < length; ) result[index] = iteratee(array[index], index, array); - return result; - } - module.exports = arrayMap; +}, function(module, exports, __webpack_require__) { + var root = __webpack_require__(31), Symbol = root.Symbol; + module.exports = Symbol; }, function(module, __webpack_exports__, __webpack_require__) { "use strict"; - var __WEBPACK_IMPORTED_MODULE_0__src_path__ = __webpack_require__(586); + var __WEBPACK_IMPORTED_MODULE_0__src_path__ = __webpack_require__(633); __webpack_require__.d(__webpack_exports__, "a", function() { return __WEBPACK_IMPORTED_MODULE_0__src_path__.a; }); @@ -4546,12 +4561,6 @@ var _bundleJs = []byte((((((((((`!function(modules) { for (var n = series.length, o = new Array(n); --n >= 0; ) o[n] = n; return o; }; -}, function(module, exports, __webpack_require__) { - function isArrayLike(value) { - return null != value && isLength(value.length) && !isFunction(value); - } - var isFunction = __webpack_require__(8), isLength = __webpack_require__(181); - module.exports = isArrayLike; }, function(module, __webpack_exports__, __webpack_require__) { "use strict"; function Cell() { @@ -4567,6 +4576,12 @@ var _bundleJs = []byte((((((((((`!function(modules) { }; Cell.propTypes = _extends({}, __WEBPACK_IMPORTED_MODULE_1__util_ReactUtils__.c), Cell.displayName = "Cell", __webpack_exports__.a = Cell; +}, function(module, exports, __webpack_require__) { + function baseIteratee(value) { + return "function" == typeof value ? value : null == value ? identity : "object" == typeof value ? isArray(value) ? baseMatchesProperty(value[0], value[1]) : baseMatches(value) : property(value); + } + var baseMatches = __webpack_require__(815), baseMatchesProperty = __webpack_require__(818), identity = __webpack_require__(68), isArray = __webpack_require__(13), property = __webpack_require__(822); + module.exports = baseIteratee; }, function(module, __webpack_exports__, __webpack_require__) { "use strict"; __webpack_exports__.a = function(x) { @@ -4601,29 +4616,29 @@ var _bundleJs = []byte((((((((((`!function(modules) { }, linearish(scale); } __webpack_exports__.b = linearish, __webpack_exports__.a = linear; - var __WEBPACK_IMPORTED_MODULE_0_d3_array__ = __webpack_require__(37), __WEBPACK_IMPORTED_MODULE_1_d3_interpolate__ = __webpack_require__(89), __WEBPACK_IMPORTED_MODULE_2__continuous__ = __webpack_require__(125), __WEBPACK_IMPORTED_MODULE_3__tickFormat__ = __webpack_require__(742); + var __WEBPACK_IMPORTED_MODULE_0_d3_array__ = __webpack_require__(37), __WEBPACK_IMPORTED_MODULE_1_d3_interpolate__ = __webpack_require__(92), __WEBPACK_IMPORTED_MODULE_2__continuous__ = __webpack_require__(137), __WEBPACK_IMPORTED_MODULE_3__tickFormat__ = __webpack_require__(883); }, function(module, __webpack_exports__, __webpack_require__) { "use strict"; - var __WEBPACK_IMPORTED_MODULE_0__src_value__ = __webpack_require__(186); + var __WEBPACK_IMPORTED_MODULE_0__src_value__ = __webpack_require__(206); __webpack_require__.d(__webpack_exports__, "a", function() { return __WEBPACK_IMPORTED_MODULE_0__src_value__.a; }); - var __WEBPACK_IMPORTED_MODULE_5__src_number__ = (__webpack_require__(310), __webpack_require__(189), - __webpack_require__(308), __webpack_require__(311), __webpack_require__(124)); + var __WEBPACK_IMPORTED_MODULE_5__src_number__ = (__webpack_require__(349), __webpack_require__(209), + __webpack_require__(347), __webpack_require__(350), __webpack_require__(136)); __webpack_require__.d(__webpack_exports__, "c", function() { return __WEBPACK_IMPORTED_MODULE_5__src_number__.a; }); - var __WEBPACK_IMPORTED_MODULE_7__src_round__ = (__webpack_require__(312), __webpack_require__(732)); + var __WEBPACK_IMPORTED_MODULE_7__src_round__ = (__webpack_require__(351), __webpack_require__(873)); __webpack_require__.d(__webpack_exports__, "d", function() { return __WEBPACK_IMPORTED_MODULE_7__src_round__.a; }); - var __WEBPACK_IMPORTED_MODULE_15__src_cubehelix__ = (__webpack_require__(313), __webpack_require__(733), - __webpack_require__(736), __webpack_require__(307), __webpack_require__(737), __webpack_require__(738), - __webpack_require__(739), __webpack_require__(740)); + var __WEBPACK_IMPORTED_MODULE_15__src_cubehelix__ = (__webpack_require__(352), __webpack_require__(874), + __webpack_require__(877), __webpack_require__(346), __webpack_require__(878), __webpack_require__(879), + __webpack_require__(880), __webpack_require__(881)); __webpack_require__.d(__webpack_exports__, "b", function() { return __WEBPACK_IMPORTED_MODULE_15__src_cubehelix__.a; }); - __webpack_require__(741); + __webpack_require__(882); }, function(module, __webpack_exports__, __webpack_require__) { "use strict"; function linear(a, d) { @@ -4650,7 +4665,7 @@ var _bundleJs = []byte((((((((((`!function(modules) { return d ? linear(a, d) : Object(__WEBPACK_IMPORTED_MODULE_0__constant__.a)(isNaN(a) ? b : a); } __webpack_exports__.c = hue, __webpack_exports__.b = gamma, __webpack_exports__.a = nogamma; - var __WEBPACK_IMPORTED_MODULE_0__constant__ = __webpack_require__(309); + var __WEBPACK_IMPORTED_MODULE_0__constant__ = __webpack_require__(348); }, function(module, __webpack_exports__, __webpack_require__) { "use strict"; __webpack_exports__.a = function(s) { @@ -4845,7 +4860,7 @@ var _bundleJs = []byte((((((((((`!function(modules) { }, function(module, exports, __webpack_require__) { "use strict"; (function(process) { - var emptyFunction = __webpack_require__(39), warning = emptyFunction; + var emptyFunction = __webpack_require__(50), warning = emptyFunction; if ("production" !== process.env.NODE_ENV) { var printWarning = function(format) { for (var _len = arguments.length, args = Array(_len > 1 ? _len - 1 : 0), _key = 1; _key < _len; _key++) args[_key - 1] = arguments[_key]; @@ -4858,7 +4873,7 @@ var _bundleJs = []byte((((((((((`!function(modules) { } catch (x) {} }; warning = function(condition, format) { - if (void 0 === format) throw new Error("` + "`")) + (`warning(condition, format, ...args)` + ("`" + ` requires a warning message argument"); + if (void 0 === format) throw new Error("` + ("`" + `warning(condition, format, ...args)`)) + ("`" + (` requires a warning message argument"); if (0 !== format.indexOf("Failed Composite propType: ") && !condition) { for (var _len2 = arguments.length, args = Array(_len2 > 2 ? _len2 - 2 : 0), _key2 = 2; _key2 < _len2; _key2++) args[_key2 - 2] = arguments[_key2]; printWarning.apply(void 0, [ format ].concat(args)); @@ -4880,7 +4895,7 @@ var _bundleJs = []byte((((((((((`!function(modules) { } } } - "production" === process.env.NODE_ENV ? (checkDCE(), module.exports = __webpack_require__(339)) : module.exports = __webpack_require__(342); + "production" === process.env.NODE_ENV ? (checkDCE(), module.exports = __webpack_require__(378)) : module.exports = __webpack_require__(381); }).call(exports, __webpack_require__(2)); }, function(module, exports, __webpack_require__) { "use strict"; @@ -4898,10 +4913,12 @@ var _bundleJs = []byte((((((((((`!function(modules) { var hasOwnProperty = Object.prototype.hasOwnProperty; module.exports = shallowEqual; }, function(module, exports, __webpack_require__) { - var toInteger = __webpack_require__(137), min = Math.min; + var toInteger = __webpack_require__(149), min = Math.min; module.exports = function(it) { return it > 0 ? min(toInteger(it), 9007199254740991) : 0; }; +}, function(module, exports) { + module.exports = !0; }, function(module, exports) { var id = 0, px = Math.random(); module.exports = function(key) { @@ -4917,7 +4934,7 @@ var _bundleJs = []byte((((((((((`!function(modules) { }; } exports.__esModule = !0; - var _iterator = __webpack_require__(357), _iterator2 = _interopRequireDefault(_iterator), _symbol = __webpack_require__(365), _symbol2 = _interopRequireDefault(_symbol), _typeof = "function" == typeof _symbol2.default && "symbol" == typeof _iterator2.default ? function(obj) { + var _iterator = __webpack_require__(396), _iterator2 = _interopRequireDefault(_iterator), _symbol = __webpack_require__(404), _symbol2 = _interopRequireDefault(_symbol), _typeof = "function" == typeof _symbol2.default && "symbol" == typeof _iterator2.default ? function(obj) { return typeof obj; } : function(obj) { return obj && "function" == typeof _symbol2.default && obj.constructor === _symbol2.default && obj !== _symbol2.default.prototype ? "symbol" : typeof obj; @@ -4928,9 +4945,9 @@ var _bundleJs = []byte((((((((((`!function(modules) { return obj && "function" == typeof _symbol2.default && obj.constructor === _symbol2.default && obj !== _symbol2.default.prototype ? "symbol" : void 0 === obj ? "undefined" : _typeof(obj); }; }, function(module, exports, __webpack_require__) { - var anObject = __webpack_require__(47), dPs = __webpack_require__(361), enumBugKeys = __webpack_require__(140), IE_PROTO = __webpack_require__(138)("IE_PROTO"), Empty = function() {}, createDict = function() { - var iframeDocument, iframe = __webpack_require__(207)("iframe"), i = enumBugKeys.length; - for (iframe.style.display = "none", __webpack_require__(362).appendChild(iframe), + var anObject = __webpack_require__(52), dPs = __webpack_require__(400), enumBugKeys = __webpack_require__(152), IE_PROTO = __webpack_require__(150)("IE_PROTO"), Empty = function() {}, createDict = function() { + var iframeDocument, iframe = __webpack_require__(225)("iframe"), i = enumBugKeys.length; + for (iframe.style.display = "none", __webpack_require__(401).appendChild(iframe), iframe.src = "javascript:", iframeDocument = iframe.contentWindow.document, iframeDocument.open(), iframeDocument.write("`, "/"+uriCopy.String()) - }, - }} -} - -//ValidateCaseErrors is a method that process the request object through certain validators -//that assert if certain conditions are met for further information to log as an error -func ValidateCaseErrors(r *Request) string { - for _, err := range caseErrors { - if err.Validator(r) { - return err.Msg(r) - } - } - - return "" -} - -//ShowMultipeChoices is used when a user requests a resource in a manifest which results -//in ambiguous results. It returns a HTML page with clickable links of each of the entry -//in the manifest which fits the request URI ambiguity. -//For example, if the user requests bzz://read and that manifest contains entries -//"readme.md" and "readinglist.txt", a HTML page is returned with this two links. -//This only applies if the manifest has no default entry -func ShowMultipleChoices(w http.ResponseWriter, req *Request, list api.ManifestList) { - msg := "" - if list.Entries == nil { - Respond(w, req, "Could not resolve", http.StatusInternalServerError) - return - } - //make links relative - //requestURI comes with the prefix of the ambiguous path, e.g. "read" for "readme.md" and "readinglist.txt" - //to get clickable links, need to remove the ambiguous path, i.e. "read" - idx := strings.LastIndex(req.RequestURI, "/") - if idx == -1 { - Respond(w, req, "Internal Server Error", http.StatusInternalServerError) - return - } - //remove ambiguous part - base := req.RequestURI[:idx+1] - for _, e := range list.Entries { - //create clickable link for each entry - msg += "" + e.Path + "
" - } - Respond(w, req, msg, http.StatusMultipleChoices) -} - -//Respond is used to show an HTML page to a client. -//If there is an `Accept` header of `application/json`, JSON will be returned instead -//The function just takes a string message which will be displayed in the error page. -//The code is used to evaluate which template will be displayed -//(and return the correct HTTP status code) -func Respond(w http.ResponseWriter, req *Request, msg string, code int) { - additionalMessage := ValidateCaseErrors(req) - switch code { - case http.StatusInternalServerError: - log.Output(msg, log.LvlError, l.CallDepth, "ruid", req.ruid, "code", code) - default: - log.Output(msg, log.LvlDebug, l.CallDepth, "ruid", req.ruid, "code", code) - } - - if code >= 400 { - w.Header().Del("Cache-Control") //avoid sending cache headers for errors! - w.Header().Del("ETag") - } - - respond(w, &req.Request, &ResponseParams{ - Code: code, - Msg: msg, - Details: template.HTML(additionalMessage), - Timestamp: time.Now().Format(time.RFC1123), - template: getTemplate(code), - }) -} - -//evaluate if client accepts html or json response -func respond(w http.ResponseWriter, r *http.Request, params *ResponseParams) { - w.WriteHeader(params.Code) - if r.Header.Get("Accept") == "application/json" { - respondJSON(w, params) - } else { - respondHTML(w, params) - } -} - -//return a HTML page -func respondHTML(w http.ResponseWriter, params *ResponseParams) { - htmlCounter.Inc(1) - err := params.template.Execute(w, params) - if err != nil { - log.Error(err.Error()) - } -} - -//return JSON -func respondJSON(w http.ResponseWriter, params *ResponseParams) { - jsonCounter.Inc(1) - w.Header().Set("Content-Type", "application/json") - json.NewEncoder(w).Encode(params) -} - -//get the HTML template for a given code -func getTemplate(code int) *template.Template { - if val, tmpl := templateMap[code]; tmpl { - return val - } - return templateMap[0] -} diff --git a/swarm/api/http/error_templates.go b/swarm/api/http/error_templates.go deleted file mode 100644 index f3c643c90d39..000000000000 --- a/swarm/api/http/error_templates.go +++ /dev/null @@ -1,561 +0,0 @@ -// Copyright 2017 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -/* -We use html templates to handle simple but as informative as possible error pages. - -To eliminate circular dependency in case of an error, we don't store error pages on swarm. -We can't save the error pages as html files on disk, or when deploying compiled binaries -they won't be found. - -For this reason we resort to save the HTML error pages as strings, which then can be -parsed by Go's html/template package -*/ -package http - -//This returns the HTML for generic errors -func GetGenericErrorPage() string { - page := ` - - - - - - - - - - - Swarm::HTTP Error Page - - - - -
- -
-
- -
-
-

There was a problem serving the requested page

-
-
-
{{.Timestamp}}
-
-
- - -
- - - - - - - - - - - - - - - - - - - - - - - -
- Hmmmmm....Swarm was not able to serve your request! -
- Error message: -
- {{.Msg}} -
- {{.Details}} -
- Error code: -
- {{.Code}} -
-
-
- -
-

- Swarm: Serverless Hosting Incentivised Peer-To-Peer Storage And Content Distribution
- Swarm -

-
- - -
- - - -` - return page -} - -//This returns the HTML for a 404 Not Found error -func GetNotFoundErrorPage() string { - page := ` - - - - - - - - - - - Swarm::404 HTTP Not Found - - - - -
- -
-
- -
-
-

Resource Not Found

-
-
-
{{.Timestamp}}
-
-
- - -
- - - - - - - - - - - - - - - - - - - - - - - - -
- Unfortunately, the resource you were trying to access could not be found on swarm. -
-
- {{.Msg}} -
- {{.Details}} -
- Error code: -
- {{.Code}} -
-
-
- -
-

- Swarm: Serverless Hosting Incentivised Peer-To-Peer Storage And Content Distribution
- Swarm -

-
- - -
- - - -` - return page -} - -//This returns the HTML for a page listing disambiguation options -//i.e. if user requested bzz://read and the manifest contains "readme.md" and "readinglist.txt", -//this page is returned with a clickable list the existing disambiguation links in the manifest -func GetMultipleChoicesErrorPage() string { - page := ` - - - - - - - - - - - Swarm::HTTP Disambiguation Page - - - - -
- -
-
- -
-
-

Swarm: disambiguation

-
-
-
{{.Timestamp}}
-
-
- - -
- - - - - - - - - - - - - - - - - - - - -
- Your request yields ambiguous results! -
- Your request may refer to: -
- {{ .Details}} -
- Error code: -
- {{.Code}} -
-
-
- -
-

- Swarm: Serverless Hosting Incentivised Peer-To-Peer Storage And Content Distribution
- Swarm -

-
- - -
- - - -` - return page -} diff --git a/swarm/api/http/middleware.go b/swarm/api/http/middleware.go new file mode 100644 index 000000000000..3b2dcc7d5986 --- /dev/null +++ b/swarm/api/http/middleware.go @@ -0,0 +1,105 @@ +package http + +import ( + "fmt" + "net/http" + "runtime/debug" + "strings" + + "github.com/ethereum/go-ethereum/metrics" + "github.com/ethereum/go-ethereum/swarm/api" + "github.com/ethereum/go-ethereum/swarm/log" + "github.com/ethereum/go-ethereum/swarm/sctx" + "github.com/ethereum/go-ethereum/swarm/spancontext" + "github.com/pborman/uuid" +) + +// Adapt chains h (main request handler) main handler to adapters (middleware handlers) +// Please note that the order of execution for `adapters` is FIFO (adapters[0] will be executed first) +func Adapt(h http.Handler, adapters ...Adapter) http.Handler { + for i := range adapters { + adapter := adapters[len(adapters)-1-i] + h = adapter(h) + } + return h +} + +type Adapter func(http.Handler) http.Handler + +func SetRequestID(h http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + r = r.WithContext(SetRUID(r.Context(), uuid.New()[:8])) + metrics.GetOrRegisterCounter(fmt.Sprintf("http.request.%s", r.Method), nil).Inc(1) + log.Info("created ruid for request", "ruid", GetRUID(r.Context()), "method", r.Method, "url", r.RequestURI) + + h.ServeHTTP(w, r) + }) +} + +func SetRequestHost(h http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + r = r.WithContext(sctx.SetHost(r.Context(), r.Host)) + log.Info("setting request host", "ruid", GetRUID(r.Context()), "host", sctx.GetHost(r.Context())) + + h.ServeHTTP(w, r) + }) +} + +func ParseURI(h http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + uri, err := api.Parse(strings.TrimLeft(r.URL.Path, "/")) + if err != nil { + w.WriteHeader(http.StatusBadRequest) + RespondError(w, r, fmt.Sprintf("invalid URI %q", r.URL.Path), http.StatusBadRequest) + return + } + if uri.Addr != "" && strings.HasPrefix(uri.Addr, "0x") { + uri.Addr = strings.TrimPrefix(uri.Addr, "0x") + + msg := fmt.Sprintf(`The requested hash seems to be prefixed with '0x'. You will be redirected to the correct URL within 5 seconds.
+ Please click here if your browser does not redirect you within 5 seconds.`, "/"+uri.String()) + w.WriteHeader(http.StatusNotFound) + w.Write([]byte(msg)) + return + } + + ctx := r.Context() + r = r.WithContext(SetURI(ctx, uri)) + log.Debug("parsed request path", "ruid", GetRUID(r.Context()), "method", r.Method, "uri.Addr", uri.Addr, "uri.Path", uri.Path, "uri.Scheme", uri.Scheme) + + h.ServeHTTP(w, r) + }) +} + +func InitLoggingResponseWriter(h http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + writer := newLoggingResponseWriter(w) + h.ServeHTTP(writer, r) + log.Debug("request served", "ruid", GetRUID(r.Context()), "code", writer.statusCode) + }) +} + +func InstrumentOpenTracing(h http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + uri := GetURI(r.Context()) + if uri == nil || r.Method == "" || (uri != nil && uri.Scheme == "") { + h.ServeHTTP(w, r) // soft fail + return + } + spanName := fmt.Sprintf("http.%s.%s", r.Method, uri.Scheme) + ctx, sp := spancontext.StartSpan(r.Context(), spanName) + defer sp.Finish() + h.ServeHTTP(w, r.WithContext(ctx)) + }) +} + +func RecoverPanic(h http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + defer func() { + if err := recover(); err != nil { + log.Error("panic recovery!", "stack trace", string(debug.Stack()), "url", r.URL.String(), "headers", r.Header) + } + }() + h.ServeHTTP(w, r) + }) +} diff --git a/swarm/api/http/response.go b/swarm/api/http/response.go new file mode 100644 index 000000000000..c9fb9d2855b8 --- /dev/null +++ b/swarm/api/http/response.go @@ -0,0 +1,133 @@ +// Copyright 2017 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package http + +import ( + "encoding/json" + "fmt" + "html/template" + "net/http" + "strings" + "time" + + "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/metrics" + "github.com/ethereum/go-ethereum/swarm/api" +) + +var ( + htmlCounter = metrics.NewRegisteredCounter("api.http.errorpage.html.count", nil) + jsonCounter = metrics.NewRegisteredCounter("api.http.errorpage.json.count", nil) + plaintextCounter = metrics.NewRegisteredCounter("api.http.errorpage.plaintext.count", nil) +) + +type ResponseParams struct { + Msg template.HTML + Code int + Timestamp string + template *template.Template + Details template.HTML +} + +// ShowMultipleChoices is used when a user requests a resource in a manifest which results +// in ambiguous results. It returns a HTML page with clickable links of each of the entry +// in the manifest which fits the request URI ambiguity. +// For example, if the user requests bzz://read and that manifest contains entries +// "readme.md" and "readinglist.txt", a HTML page is returned with this two links. +// This only applies if the manifest has no default entry +func ShowMultipleChoices(w http.ResponseWriter, r *http.Request, list api.ManifestList) { + log.Debug("ShowMultipleChoices", "ruid", GetRUID(r.Context()), "uri", GetURI(r.Context())) + msg := "" + if list.Entries == nil { + RespondError(w, r, "Could not resolve", http.StatusInternalServerError) + return + } + requestUri := strings.TrimPrefix(r.RequestURI, "/") + + uri, err := api.Parse(requestUri) + if err != nil { + RespondError(w, r, "Bad Request", http.StatusBadRequest) + } + + uri.Scheme = "bzz-list" + msg += fmt.Sprintf("Disambiguation:
Your request may refer to multiple choices.
Click here if your browser does not redirect you within 5 seconds.
", "/"+uri.String()) + RespondTemplate(w, r, "error", msg, http.StatusMultipleChoices) +} + +func RespondTemplate(w http.ResponseWriter, r *http.Request, templateName, msg string, code int) { + log.Debug("RespondTemplate", "ruid", GetRUID(r.Context()), "uri", GetURI(r.Context())) + respond(w, r, &ResponseParams{ + Code: code, + Msg: template.HTML(msg), + Timestamp: time.Now().Format(time.RFC1123), + template: TemplatesMap[templateName], + }) +} + +func RespondError(w http.ResponseWriter, r *http.Request, msg string, code int) { + log.Debug("RespondError", "ruid", GetRUID(r.Context()), "uri", GetURI(r.Context()), "code", code) + RespondTemplate(w, r, "error", msg, code) +} + +func respond(w http.ResponseWriter, r *http.Request, params *ResponseParams) { + + w.WriteHeader(params.Code) + + if params.Code >= 400 { + w.Header().Del("Cache-Control") + w.Header().Del("ETag") + } + + acceptHeader := r.Header.Get("Accept") + // this cannot be in a switch since an Accept header can have multiple values: "Accept: */*, text/html, application/xhtml+xml, application/xml;q=0.9, */*;q=0.8" + if strings.Contains(acceptHeader, "application/json") { + if err := respondJSON(w, r, params); err != nil { + RespondError(w, r, "Internal server error", http.StatusInternalServerError) + } + } else if strings.Contains(acceptHeader, "text/html") { + respondHTML(w, r, params) + } else { + respondPlaintext(w, r, params) //returns nice errors for curl + } +} + +func respondHTML(w http.ResponseWriter, r *http.Request, params *ResponseParams) { + htmlCounter.Inc(1) + log.Debug("respondHTML", "ruid", GetRUID(r.Context())) + err := params.template.Execute(w, params) + if err != nil { + log.Error(err.Error()) + } +} + +func respondJSON(w http.ResponseWriter, r *http.Request, params *ResponseParams) error { + jsonCounter.Inc(1) + log.Debug("respondJSON", "ruid", GetRUID(r.Context())) + w.Header().Set("Content-Type", "application/json") + return json.NewEncoder(w).Encode(params) +} + +func respondPlaintext(w http.ResponseWriter, r *http.Request, params *ResponseParams) error { + plaintextCounter.Inc(1) + log.Debug("respondPlaintext", "ruid", GetRUID(r.Context())) + w.Header().Set("Content-Type", "text/plain") + strToWrite := "Code: " + fmt.Sprintf("%d", params.Code) + "\n" + strToWrite += "Message: " + string(params.Msg) + "\n" + strToWrite += "Timestamp: " + params.Timestamp + "\n" + _, err := w.Write([]byte(strToWrite)) + return err +} diff --git a/swarm/api/http/error_test.go b/swarm/api/http/response_test.go similarity index 98% rename from swarm/api/http/error_test.go rename to swarm/api/http/response_test.go index 86eff86b2319..2e24bda6d1dd 100644 --- a/swarm/api/http/error_test.go +++ b/swarm/api/http/response_test.go @@ -29,7 +29,6 @@ import ( ) func TestError(t *testing.T) { - srv := testutil.NewTestSwarmServer(t, serverFunc) defer srv.Close() @@ -45,7 +44,7 @@ func TestError(t *testing.T) { defer resp.Body.Close() respbody, err = ioutil.ReadAll(resp.Body) - if resp.StatusCode != 400 && !strings.Contains(string(respbody), "Invalid URI "/this_should_fail_as_no_bzz_protocol_present": unknown scheme") { + if resp.StatusCode != 404 && !strings.Contains(string(respbody), "Invalid URI "/this_should_fail_as_no_bzz_protocol_present": unknown scheme") { t.Fatalf("Response body does not match, expected: %v, to contain: %v; received code %d, expected code: %d", string(respbody), "Invalid bzz URI: unknown scheme", 400, resp.StatusCode) } diff --git a/swarm/api/http/sctx.go b/swarm/api/http/sctx.go new file mode 100644 index 000000000000..431e11735409 --- /dev/null +++ b/swarm/api/http/sctx.go @@ -0,0 +1,38 @@ +package http + +import ( + "context" + + "github.com/ethereum/go-ethereum/swarm/api" + "github.com/ethereum/go-ethereum/swarm/sctx" +) + +type contextKey int + +const ( + uriKey contextKey = iota +) + +func GetRUID(ctx context.Context) string { + v, ok := ctx.Value(sctx.HTTPRequestIDKey).(string) + if ok { + return v + } + return "xxxxxxxx" +} + +func SetRUID(ctx context.Context, ruid string) context.Context { + return context.WithValue(ctx, sctx.HTTPRequestIDKey, ruid) +} + +func GetURI(ctx context.Context) *api.URI { + v, ok := ctx.Value(uriKey).(*api.URI) + if ok { + return v + } + return nil +} + +func SetURI(ctx context.Context, uri *api.URI) context.Context { + return context.WithValue(ctx, uriKey, uri) +} diff --git a/swarm/api/http/server.go b/swarm/api/http/server.go index ba8b2b7ba915..b5ea0c23d47a 100644 --- a/swarm/api/http/server.go +++ b/swarm/api/http/server.go @@ -20,11 +20,9 @@ A simple http server interface to Swarm package http import ( - "archive/tar" "bufio" "bytes" "encoding/json" - "errors" "fmt" "io" "io/ioutil" @@ -39,13 +37,12 @@ import ( "time" "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/metrics" "github.com/ethereum/go-ethereum/swarm/api" "github.com/ethereum/go-ethereum/swarm/log" "github.com/ethereum/go-ethereum/swarm/storage" "github.com/ethereum/go-ethereum/swarm/storage/mru" - "github.com/pborman/uuid" + "github.com/rs/cors" ) @@ -67,94 +64,207 @@ var ( getFileCount = metrics.NewRegisteredCounter("api.http.get.file.count", nil) getFileNotFound = metrics.NewRegisteredCounter("api.http.get.file.notfound", nil) getFileFail = metrics.NewRegisteredCounter("api.http.get.file.fail", nil) - getFilesCount = metrics.NewRegisteredCounter("api.http.get.files.count", nil) - getFilesFail = metrics.NewRegisteredCounter("api.http.get.files.fail", nil) getListCount = metrics.NewRegisteredCounter("api.http.get.list.count", nil) getListFail = metrics.NewRegisteredCounter("api.http.get.list.fail", nil) ) -// ServerConfig is the basic configuration needed for the HTTP server and also -// includes CORS settings. -type ServerConfig struct { - Addr string - CorsString string -} +type methodHandler map[string]http.Handler -// browser API for registering bzz url scheme handlers: -// https://developer.mozilla.org/en/docs/Web-based_protocol_handlers -// electron (chromium) api for registering bzz url scheme handlers: -// https://github.com/atom/electron/blob/master/docs/api/protocol.md +func (m methodHandler) ServeHTTP(rw http.ResponseWriter, r *http.Request) { + v, ok := m[r.Method] + if ok { + v.ServeHTTP(rw, r) + return + } + rw.WriteHeader(http.StatusMethodNotAllowed) +} -// starts up http server -func StartHTTPServer(api *api.API, config *ServerConfig) { +func NewServer(api *api.API, corsString string) *Server { var allowedOrigins []string - for _, domain := range strings.Split(config.CorsString, ",") { + for _, domain := range strings.Split(corsString, ",") { allowedOrigins = append(allowedOrigins, strings.TrimSpace(domain)) } c := cors.New(cors.Options{ AllowedOrigins: allowedOrigins, - AllowedMethods: []string{"POST", "GET", "DELETE", "PATCH", "PUT"}, + AllowedMethods: []string{http.MethodPost, http.MethodGet, http.MethodDelete, http.MethodPatch, http.MethodPut}, MaxAge: 600, AllowedHeaders: []string{"*"}, }) - hdlr := c.Handler(NewServer(api)) - go http.ListenAndServe(config.Addr, hdlr) + server := &Server{api: api} + + defaultMiddlewares := []Adapter{ + RecoverPanic, + SetRequestID, + SetRequestHost, + InitLoggingResponseWriter, + ParseURI, + InstrumentOpenTracing, + } + + mux := http.NewServeMux() + mux.Handle("/bzz:/", methodHandler{ + "GET": Adapt( + http.HandlerFunc(server.HandleBzzGet), + defaultMiddlewares..., + ), + "POST": Adapt( + http.HandlerFunc(server.HandlePostFiles), + defaultMiddlewares..., + ), + "DELETE": Adapt( + http.HandlerFunc(server.HandleDelete), + defaultMiddlewares..., + ), + }) + mux.Handle("/bzz-raw:/", methodHandler{ + "GET": Adapt( + http.HandlerFunc(server.HandleGet), + defaultMiddlewares..., + ), + "POST": Adapt( + http.HandlerFunc(server.HandlePostRaw), + defaultMiddlewares..., + ), + }) + mux.Handle("/bzz-immutable:/", methodHandler{ + "GET": Adapt( + http.HandlerFunc(server.HandleGet), + defaultMiddlewares..., + ), + }) + mux.Handle("/bzz-hash:/", methodHandler{ + "GET": Adapt( + http.HandlerFunc(server.HandleGet), + defaultMiddlewares..., + ), + }) + mux.Handle("/bzz-list:/", methodHandler{ + "GET": Adapt( + http.HandlerFunc(server.HandleGetList), + defaultMiddlewares..., + ), + }) + mux.Handle("/bzz-resource:/", methodHandler{ + "GET": Adapt( + http.HandlerFunc(server.HandleGetResource), + defaultMiddlewares..., + ), + "POST": Adapt( + http.HandlerFunc(server.HandlePostResource), + defaultMiddlewares..., + ), + }) + + mux.Handle("/", methodHandler{ + "GET": Adapt( + http.HandlerFunc(server.HandleRootPaths), + SetRequestID, + InitLoggingResponseWriter, + ), + }) + server.Handler = c.Handler(mux) + + return server } -func NewServer(api *api.API) *Server { - return &Server{api} +func (s *Server) ListenAndServe(addr string) error { + s.listenAddr = addr + return http.ListenAndServe(addr, s) } +// browser API for registering bzz url scheme handlers: +// https://developer.mozilla.org/en/docs/Web-based_protocol_handlers +// electron (chromium) api for registering bzz url scheme handlers: +// https://github.com/atom/electron/blob/master/docs/api/protocol.md type Server struct { - api *api.API + http.Handler + api *api.API + listenAddr string } -// Request wraps http.Request and also includes the parsed bzz URI -type Request struct { - http.Request +func (s *Server) HandleBzzGet(w http.ResponseWriter, r *http.Request) { + log.Debug("handleBzzGet", "ruid", GetRUID(r.Context()), "uri", r.RequestURI) + if r.Header.Get("Accept") == "application/x-tar" { + uri := GetURI(r.Context()) + _, credentials, _ := r.BasicAuth() + reader, err := s.api.GetDirectoryTar(r.Context(), s.api.Decryptor(r.Context(), credentials), uri) + if err != nil { + if isDecryptError(err) { + w.Header().Set("WWW-Authenticate", fmt.Sprintf("Basic realm=%q", uri.Address().String())) + RespondError(w, r, err.Error(), http.StatusUnauthorized) + return + } + RespondError(w, r, fmt.Sprintf("Had an error building the tarball: %v", err), http.StatusInternalServerError) + return + } + defer reader.Close() + + w.Header().Set("Content-Type", "application/x-tar") + w.WriteHeader(http.StatusOK) + io.Copy(w, reader) + return + } - uri *api.URI - ruid string // request unique id + s.HandleGetFile(w, r) +} + +func (s *Server) HandleRootPaths(w http.ResponseWriter, r *http.Request) { + switch r.RequestURI { + case "/": + RespondTemplate(w, r, "landing-page", "Swarm: Please request a valid ENS or swarm hash with the appropriate bzz scheme", 200) + return + case "/robots.txt": + w.Header().Set("Last-Modified", time.Now().Format(http.TimeFormat)) + fmt.Fprintf(w, "User-agent: *\nDisallow: /") + case "/favicon.ico": + w.WriteHeader(http.StatusOK) + w.Write(faviconBytes) + default: + RespondError(w, r, "Not Found", http.StatusNotFound) + } } // HandlePostRaw handles a POST request to a raw bzz-raw:/ URI, stores the request // body in swarm and returns the resulting storage address as a text/plain response -func (s *Server) HandlePostRaw(w http.ResponseWriter, r *Request) { - log.Debug("handle.post.raw", "ruid", r.ruid) +func (s *Server) HandlePostRaw(w http.ResponseWriter, r *http.Request) { + ruid := GetRUID(r.Context()) + log.Debug("handle.post.raw", "ruid", ruid) postRawCount.Inc(1) toEncrypt := false - if r.uri.Addr == "encrypt" { + uri := GetURI(r.Context()) + if uri.Addr == "encrypt" { toEncrypt = true } - if r.uri.Path != "" { + if uri.Path != "" { postRawFail.Inc(1) - Respond(w, r, "raw POST request cannot contain a path", http.StatusBadRequest) + RespondError(w, r, "raw POST request cannot contain a path", http.StatusBadRequest) return } - if r.uri.Addr != "" && r.uri.Addr != "encrypt" { + if uri.Addr != "" && uri.Addr != "encrypt" { postRawFail.Inc(1) - Respond(w, r, "raw POST request addr can only be empty or \"encrypt\"", http.StatusBadRequest) + RespondError(w, r, "raw POST request addr can only be empty or \"encrypt\"", http.StatusBadRequest) return } if r.Header.Get("Content-Length") == "" { postRawFail.Inc(1) - Respond(w, r, "missing Content-Length header in request", http.StatusBadRequest) + RespondError(w, r, "missing Content-Length header in request", http.StatusBadRequest) return } - addr, _, err := s.api.Store(r.Body, r.ContentLength, toEncrypt) + + addr, _, err := s.api.Store(r.Context(), r.Body, r.ContentLength, toEncrypt) if err != nil { postRawFail.Inc(1) - Respond(w, r, err.Error(), http.StatusInternalServerError) + RespondError(w, r, err.Error(), http.StatusInternalServerError) return } - log.Debug("stored content", "ruid", r.ruid, "key", addr) + log.Debug("stored content", "ruid", ruid, "key", addr) w.Header().Set("Content-Type", "text/plain") w.WriteHeader(http.StatusOK) @@ -166,47 +276,52 @@ func (s *Server) HandlePostRaw(w http.ResponseWriter, r *Request) { // (either a tar archive or multipart form), adds those files either to an // existing manifest or to a new manifest under and returns the // resulting manifest hash as a text/plain response -func (s *Server) HandlePostFiles(w http.ResponseWriter, r *Request) { - log.Debug("handle.post.files", "ruid", r.ruid) - +func (s *Server) HandlePostFiles(w http.ResponseWriter, r *http.Request) { + ruid := GetRUID(r.Context()) + log.Debug("handle.post.files", "ruid", ruid) postFilesCount.Inc(1) + contentType, params, err := mime.ParseMediaType(r.Header.Get("Content-Type")) if err != nil { postFilesFail.Inc(1) - Respond(w, r, err.Error(), http.StatusBadRequest) + RespondError(w, r, err.Error(), http.StatusBadRequest) return } toEncrypt := false - if r.uri.Addr == "encrypt" { + uri := GetURI(r.Context()) + if uri.Addr == "encrypt" { toEncrypt = true } var addr storage.Address - if r.uri.Addr != "" && r.uri.Addr != "encrypt" { - addr, err = s.api.Resolve(r.uri) + if uri.Addr != "" && uri.Addr != "encrypt" { + addr, err = s.api.Resolve(r.Context(), uri.Addr) if err != nil { postFilesFail.Inc(1) - Respond(w, r, fmt.Sprintf("cannot resolve %s: %s", r.uri.Addr, err), http.StatusInternalServerError) + RespondError(w, r, fmt.Sprintf("cannot resolve %s: %s", uri.Addr, err), http.StatusInternalServerError) return } - log.Debug("resolved key", "ruid", r.ruid, "key", addr) + log.Debug("resolved key", "ruid", ruid, "key", addr) } else { - addr, err = s.api.NewManifest(toEncrypt) + addr, err = s.api.NewManifest(r.Context(), toEncrypt) if err != nil { postFilesFail.Inc(1) - Respond(w, r, err.Error(), http.StatusInternalServerError) + RespondError(w, r, err.Error(), http.StatusInternalServerError) return } - log.Debug("new manifest", "ruid", r.ruid, "key", addr) + log.Debug("new manifest", "ruid", ruid, "key", addr) } - newAddr, err := s.updateManifest(addr, func(mw *api.ManifestWriter) error { + newAddr, err := s.api.UpdateManifest(r.Context(), addr, func(mw *api.ManifestWriter) error { switch contentType { - case "application/x-tar": - return s.handleTarUpload(r, mw) - + _, err := s.handleTarUpload(r, mw) + if err != nil { + RespondError(w, r, fmt.Sprintf("error uploading tarball: %v", err), http.StatusInternalServerError) + return err + } + return nil case "multipart/form-data": return s.handleMultipartUpload(r, params["boundary"], mw) @@ -216,54 +331,33 @@ func (s *Server) HandlePostFiles(w http.ResponseWriter, r *Request) { }) if err != nil { postFilesFail.Inc(1) - Respond(w, r, fmt.Sprintf("cannot create manifest: %s", err), http.StatusInternalServerError) + RespondError(w, r, fmt.Sprintf("cannot create manifest: %s", err), http.StatusInternalServerError) return } - log.Debug("stored content", "ruid", r.ruid, "key", newAddr) + log.Debug("stored content", "ruid", ruid, "key", newAddr) w.Header().Set("Content-Type", "text/plain") w.WriteHeader(http.StatusOK) fmt.Fprint(w, newAddr) } -func (s *Server) handleTarUpload(req *Request, mw *api.ManifestWriter) error { - log.Debug("handle.tar.upload", "ruid", req.ruid) - tr := tar.NewReader(req.Body) - for { - hdr, err := tr.Next() - if err == io.EOF { - return nil - } else if err != nil { - return fmt.Errorf("error reading tar stream: %s", err) - } +func (s *Server) handleTarUpload(r *http.Request, mw *api.ManifestWriter) (storage.Address, error) { + log.Debug("handle.tar.upload", "ruid", GetRUID(r.Context())) - // only store regular files - if !hdr.FileInfo().Mode().IsRegular() { - continue - } + defaultPath := r.URL.Query().Get("defaultpath") - // add the entry under the path from the request - path := path.Join(req.uri.Path, hdr.Name) - entry := &api.ManifestEntry{ - Path: path, - ContentType: hdr.Xattrs["user.swarm.content-type"], - Mode: hdr.Mode, - Size: hdr.Size, - ModTime: hdr.ModTime, - } - log.Debug("adding path to new manifest", "ruid", req.ruid, "bytes", entry.Size, "path", entry.Path) - contentKey, err := mw.AddEntry(tr, entry) - if err != nil { - return fmt.Errorf("error adding manifest entry from tar stream: %s", err) - } - log.Debug("stored content", "ruid", req.ruid, "key", contentKey) + key, err := s.api.UploadTar(r.Context(), r.Body, GetURI(r.Context()).Path, defaultPath, mw) + if err != nil { + return nil, err } + return key, nil } -func (s *Server) handleMultipartUpload(req *Request, boundary string, mw *api.ManifestWriter) error { - log.Debug("handle.multipart.upload", "ruid", req.ruid) - mr := multipart.NewReader(req.Body, boundary) +func (s *Server) handleMultipartUpload(r *http.Request, boundary string, mw *api.ManifestWriter) error { + ruid := GetRUID(r.Context()) + log.Debug("handle.multipart.upload", "ruid", ruid) + mr := multipart.NewReader(r.Body, boundary) for { part, err := mr.NextPart() if err == io.EOF { @@ -303,59 +397,52 @@ func (s *Server) handleMultipartUpload(req *Request, boundary string, mw *api.Ma if name == "" { name = part.FormName() } - path := path.Join(req.uri.Path, name) + uri := GetURI(r.Context()) + path := path.Join(uri.Path, name) entry := &api.ManifestEntry{ Path: path, ContentType: part.Header.Get("Content-Type"), Size: size, ModTime: time.Now(), } - log.Debug("adding path to new manifest", "ruid", req.ruid, "bytes", entry.Size, "path", entry.Path) - contentKey, err := mw.AddEntry(reader, entry) + log.Debug("adding path to new manifest", "ruid", ruid, "bytes", entry.Size, "path", entry.Path) + contentKey, err := mw.AddEntry(r.Context(), reader, entry) if err != nil { return fmt.Errorf("error adding manifest entry from multipart form: %s", err) } - log.Debug("stored content", "ruid", req.ruid, "key", contentKey) + log.Debug("stored content", "ruid", ruid, "key", contentKey) } } -func (s *Server) handleDirectUpload(req *Request, mw *api.ManifestWriter) error { - log.Debug("handle.direct.upload", "ruid", req.ruid) - key, err := mw.AddEntry(req.Body, &api.ManifestEntry{ - Path: req.uri.Path, - ContentType: req.Header.Get("Content-Type"), +func (s *Server) handleDirectUpload(r *http.Request, mw *api.ManifestWriter) error { + ruid := GetRUID(r.Context()) + log.Debug("handle.direct.upload", "ruid", ruid) + key, err := mw.AddEntry(r.Context(), r.Body, &api.ManifestEntry{ + Path: GetURI(r.Context()).Path, + ContentType: r.Header.Get("Content-Type"), Mode: 0644, - Size: req.ContentLength, + Size: r.ContentLength, ModTime: time.Now(), }) if err != nil { return err } - log.Debug("stored content", "ruid", req.ruid, "key", key) + log.Debug("stored content", "ruid", ruid, "key", key) return nil } // HandleDelete handles a DELETE request to bzz://, removes // from and returns the resulting manifest hash as a // text/plain response -func (s *Server) HandleDelete(w http.ResponseWriter, r *Request) { - log.Debug("handle.delete", "ruid", r.ruid) - +func (s *Server) HandleDelete(w http.ResponseWriter, r *http.Request) { + ruid := GetRUID(r.Context()) + uri := GetURI(r.Context()) + log.Debug("handle.delete", "ruid", ruid) deleteCount.Inc(1) - key, err := s.api.Resolve(r.uri) + newKey, err := s.api.Delete(r.Context(), uri.Addr, uri.Path) if err != nil { deleteFail.Inc(1) - Respond(w, r, fmt.Sprintf("cannot resolve %s: %s", r.uri.Addr, err), http.StatusInternalServerError) - return - } - - newKey, err := s.updateManifest(key, func(mw *api.ManifestWriter) error { - log.Debug(fmt.Sprintf("removing %s from manifest %s", r.uri.Path, key.Log()), "ruid", r.ruid) - return mw.RemoveEntry(r.uri.Path) - }) - if err != nil { - deleteFail.Inc(1) - Respond(w, r, fmt.Sprintf("cannot update manifest: %s", err), http.StatusInternalServerError) + RespondError(w, r, fmt.Sprintf("could not delete from manifest: %v", err), http.StatusInternalServerError) return } @@ -396,41 +483,62 @@ func resourcePostMode(path string) (isRaw bool, frequency uint64, err error) { // If the latter is used, a subsequent bzz:// GET call to the manifest of the resource will return // the page that the multihash is pointing to, as if it held a normal swarm content manifest // -// The resource name will be verbatim what is passed as the address part of the url. -// For example, if a POST is made to /bzz-resource:/foo.eth/raw/13 a new resource with frequency 13 -// and name "foo.eth" will be created -func (s *Server) HandlePostResource(w http.ResponseWriter, r *Request) { - log.Debug("handle.post.resource", "ruid", r.ruid) +// The POST request admits a JSON structure as defined in the mru package: `mru.updateRequestJSON` +// The requests can be to a) create a resource, b) update a resource or c) both a+b: create a resource and set the initial content +func (s *Server) HandlePostResource(w http.ResponseWriter, r *http.Request) { + ruid := GetRUID(r.Context()) + log.Debug("handle.post.resource", "ruid", ruid) var err error - var addr storage.Address - var name string - var outdata []byte - isRaw, frequency, err := resourcePostMode(r.uri.Path) + + // Creation and update must send mru.updateRequestJSON JSON structure + body, err := ioutil.ReadAll(r.Body) if err != nil { - Respond(w, r, err.Error(), http.StatusBadRequest) + RespondError(w, r, err.Error(), http.StatusInternalServerError) + return + } + var updateRequest mru.Request + if err := updateRequest.UnmarshalJSON(body); err != nil { // decodes request JSON + RespondError(w, r, err.Error(), http.StatusBadRequest) //TODO: send different status response depending on error return } - // new mutable resource creation will always have a frequency field larger than 0 - if frequency > 0 { - - name = r.uri.Addr + if updateRequest.IsUpdate() { + // Verify that the signature is intact and that the signer is authorized + // to update this resource + // Check this early, to avoid creating a resource and then not being able to set its first update. + if err = updateRequest.Verify(); err != nil { + RespondError(w, r, err.Error(), http.StatusForbidden) + return + } + } - // the key is the content addressed root chunk holding mutable resource metadata information - addr, err = s.api.ResourceCreate(r.Context(), name, frequency) + if updateRequest.IsNew() { + err = s.api.ResourceCreate(r.Context(), &updateRequest) if err != nil { code, err2 := s.translateResourceError(w, r, "resource creation fail", err) + RespondError(w, r, err2.Error(), code) + return + } + } - Respond(w, r, err2.Error(), code) + if updateRequest.IsUpdate() { + _, err = s.api.ResourceUpdate(r.Context(), &updateRequest.SignedResourceUpdate) + if err != nil { + RespondError(w, r, err.Error(), http.StatusInternalServerError) return } + } + + // at this point both possible operations (create, update or both) were successful + // so in case it was a new resource, then create a manifest and send it over. + if updateRequest.IsNew() { // we create a manifest so we can retrieve the resource with bzz:// later // this manifest has a special "resource type" manifest, and its hash is the key of the mutable resource - // root chunk - m, err := s.api.NewResourceManifest(addr.Hex()) + // metadata chunk (rootAddr) + m, err := s.api.NewResourceManifest(r.Context(), updateRequest.RootAddr().Hex()) if err != nil { - Respond(w, r, fmt.Sprintf("failed to create resource manifest: %v", err), http.StatusInternalServerError) + RespondError(w, r, fmt.Sprintf("failed to create resource manifest: %v", err), http.StatusInternalServerError) return } @@ -438,133 +546,87 @@ func (s *Server) HandlePostResource(w http.ResponseWriter, r *Request) { // the client can access the root chunk key directly through its Hash member // the manifest key should be set as content in the resolver of the ENS name // \TODO update manifest key automatically in ENS - outdata, err = json.Marshal(m) - if err != nil { - Respond(w, r, fmt.Sprintf("failed to create json response: %s", err), http.StatusInternalServerError) - return - } - } else { - // to update the resource through http we need to retrieve the key for the mutable resource root chunk - // that means that we retrieve the manifest and inspect its Hash member. - manifestAddr := r.uri.Address() - if manifestAddr == nil { - manifestAddr, err = s.api.Resolve(r.uri) - if err != nil { - getFail.Inc(1) - Respond(w, r, fmt.Sprintf("cannot resolve %s: %s", r.uri.Addr, err), http.StatusNotFound) - return - } - } else { - w.Header().Set("Cache-Control", "max-age=2147483648") - } - - // get the root chunk key from the manifest - addr, err = s.api.ResolveResourceManifest(manifestAddr) - if err != nil { - getFail.Inc(1) - Respond(w, r, fmt.Sprintf("error resolving resource root chunk for %s: %s", r.uri.Addr, err), http.StatusNotFound) - return - } - - log.Debug("handle.post.resource: resolved", "ruid", r.ruid, "manifestkey", manifestAddr, "rootchunkkey", addr) - - name, _, err = s.api.ResourceLookup(r.Context(), addr, 0, 0, &mru.LookupParams{}) + outdata, err := json.Marshal(m) if err != nil { - Respond(w, r, err.Error(), http.StatusNotFound) + RespondError(w, r, fmt.Sprintf("failed to create json response: %s", err), http.StatusInternalServerError) return } - } - - // Creation and update must send data aswell. This data constitutes the update data itself. - data, err := ioutil.ReadAll(r.Body) - if err != nil { - Respond(w, r, err.Error(), http.StatusInternalServerError) - return - } - - // Multihash will be passed as hex-encoded data, so we need to parse this to bytes - if isRaw { - _, _, _, err = s.api.ResourceUpdate(r.Context(), name, data) - if err != nil { - Respond(w, r, err.Error(), http.StatusBadRequest) - return - } - } else { - bytesdata, err := hexutil.Decode(string(data)) - if err != nil { - Respond(w, r, err.Error(), http.StatusBadRequest) - return - } - _, _, _, err = s.api.ResourceUpdateMultihash(r.Context(), name, bytesdata) - if err != nil { - Respond(w, r, err.Error(), http.StatusBadRequest) - return - } - } - - // If we have data to return, write this now - // \TODO there should always be data to return here - if len(outdata) > 0 { - w.Header().Add("Content-type", "text/plain") - w.WriteHeader(http.StatusOK) fmt.Fprint(w, string(outdata)) - return } - w.WriteHeader(http.StatusOK) + w.Header().Add("Content-type", "application/json") } // Retrieve mutable resource updates: // bzz-resource:// - get latest update // bzz-resource:/// - get latest update on period n // bzz-resource://// - get update version m of period n +// bzz-resource:///meta - get metadata and next version information // = ens name or hash -func (s *Server) HandleGetResource(w http.ResponseWriter, r *Request) { - s.handleGetResource(w, r) -} - // TODO: Enable pass maxPeriod parameter -func (s *Server) handleGetResource(w http.ResponseWriter, r *Request) { - log.Debug("handle.get.resource", "ruid", r.ruid) +func (s *Server) HandleGetResource(w http.ResponseWriter, r *http.Request) { + ruid := GetRUID(r.Context()) + uri := GetURI(r.Context()) + log.Debug("handle.get.resource", "ruid", ruid) var err error // resolve the content key. - manifestAddr := r.uri.Address() + manifestAddr := uri.Address() if manifestAddr == nil { - manifestAddr, err = s.api.Resolve(r.uri) + manifestAddr, err = s.api.Resolve(r.Context(), uri.Addr) if err != nil { getFail.Inc(1) - Respond(w, r, fmt.Sprintf("cannot resolve %s: %s", r.uri.Addr, err), http.StatusNotFound) + RespondError(w, r, fmt.Sprintf("cannot resolve %s: %s", uri.Addr, err), http.StatusNotFound) return } } else { w.Header().Set("Cache-Control", "max-age=2147483648") } - // get the root chunk key from the manifest - key, err := s.api.ResolveResourceManifest(manifestAddr) + // get the root chunk rootAddr from the manifest + rootAddr, err := s.api.ResolveResourceManifest(r.Context(), manifestAddr) if err != nil { getFail.Inc(1) - Respond(w, r, fmt.Sprintf("error resolving resource root chunk for %s: %s", r.uri.Addr, err), http.StatusNotFound) + RespondError(w, r, fmt.Sprintf("error resolving resource root chunk for %s: %s", uri.Addr, err), http.StatusNotFound) return } - log.Debug("handle.get.resource: resolved", "ruid", r.ruid, "manifestkey", manifestAddr, "rootchunk key", key) + log.Debug("handle.get.resource: resolved", "ruid", ruid, "manifestkey", manifestAddr, "rootchunk addr", rootAddr) - // determine if the query specifies period and version + // determine if the query specifies period and version or it is a metadata query var params []string - if len(r.uri.Path) > 0 { - params = strings.Split(r.uri.Path, "/") + if len(uri.Path) > 0 { + if uri.Path == "meta" { + unsignedUpdateRequest, err := s.api.ResourceNewRequest(r.Context(), rootAddr) + if err != nil { + getFail.Inc(1) + RespondError(w, r, fmt.Sprintf("cannot retrieve resource metadata for rootAddr=%s: %s", rootAddr.Hex(), err), http.StatusNotFound) + return + } + rawResponse, err := unsignedUpdateRequest.MarshalJSON() + if err != nil { + RespondError(w, r, fmt.Sprintf("cannot encode unsigned UpdateRequest: %v", err), http.StatusInternalServerError) + return + } + w.Header().Add("Content-type", "application/json") + w.WriteHeader(http.StatusOK) + fmt.Fprint(w, string(rawResponse)) + return + + } + + params = strings.Split(uri.Path, "/") + } var name string - var period uint64 - var version uint64 var data []byte now := time.Now() switch len(params) { case 0: // latest only - name, data, err = s.api.ResourceLookup(r.Context(), key, 0, 0, nil) + name, data, err = s.api.ResourceLookup(r.Context(), mru.LookupLatest(rootAddr)) case 2: // specific period and version + var version uint64 + var period uint64 version, err = strconv.ParseUint(params[1], 10, 32) if err != nil { break @@ -573,13 +635,14 @@ func (s *Server) handleGetResource(w http.ResponseWriter, r *Request) { if err != nil { break } - name, data, err = s.api.ResourceLookup(r.Context(), key, uint32(period), uint32(version), nil) + name, data, err = s.api.ResourceLookup(r.Context(), mru.LookupVersion(rootAddr, uint32(period), uint32(version))) case 1: // last version of specific period + var period uint64 period, err = strconv.ParseUint(params[0], 10, 32) if err != nil { break } - name, data, err = s.api.ResourceLookup(r.Context(), key, uint32(period), uint32(version), nil) + name, data, err = s.api.ResourceLookup(r.Context(), mru.LookupLatestVersionInPeriod(rootAddr, uint32(period))) default: // bogus err = mru.NewError(storage.ErrInvalidValue, "invalid mutable resource request") } @@ -587,17 +650,17 @@ func (s *Server) handleGetResource(w http.ResponseWriter, r *Request) { // any error from the switch statement will end up here if err != nil { code, err2 := s.translateResourceError(w, r, "mutable resource lookup fail", err) - Respond(w, r, err2.Error(), code) + RespondError(w, r, err2.Error(), code) return } // All ok, serve the retrieved update - log.Debug("Found update", "name", name, "ruid", r.ruid) + log.Debug("Found update", "name", name, "ruid", ruid) w.Header().Set("Content-Type", "application/octet-stream") - http.ServeContent(w, &r.Request, "", now, bytes.NewReader(data)) + http.ServeContent(w, r, "", now, bytes.NewReader(data)) } -func (s *Server) translateResourceError(w http.ResponseWriter, r *Request, supErr string, err error) (int, error) { +func (s *Server) translateResourceError(w http.ResponseWriter, r *http.Request, supErr string, err error) (int, error) { code := 0 defaultErr := fmt.Errorf("%s: %v", supErr, err) rsrcErr, ok := err.(*mru.Error) @@ -623,86 +686,48 @@ func (s *Server) translateResourceError(w http.ResponseWriter, r *Request, supEr // given storage key // - bzz-hash:// and responds with the hash of the content stored // at the given storage key as a text/plain response -func (s *Server) HandleGet(w http.ResponseWriter, r *Request) { - log.Debug("handle.get", "ruid", r.ruid, "uri", r.uri) +func (s *Server) HandleGet(w http.ResponseWriter, r *http.Request) { + ruid := GetRUID(r.Context()) + uri := GetURI(r.Context()) + log.Debug("handle.get", "ruid", ruid, "uri", uri) getCount.Inc(1) - var err error - addr := r.uri.Address() - if addr == nil { - addr, err = s.api.Resolve(r.uri) - if err != nil { - getFail.Inc(1) - Respond(w, r, fmt.Sprintf("cannot resolve %s: %s", r.uri.Addr, err), http.StatusNotFound) - return - } - } else { - w.Header().Set("Cache-Control", "max-age=2147483648, immutable") // url was of type bzz:///path, so we are sure it is immutable. + _, pass, _ := r.BasicAuth() + + addr, err := s.api.ResolveURI(r.Context(), uri, pass) + if err != nil { + getFail.Inc(1) + RespondError(w, r, fmt.Sprintf("cannot resolve %s: %s", uri.Addr, err), http.StatusNotFound) + return } + w.Header().Set("Cache-Control", "max-age=2147483648, immutable") // url was of type bzz:///path, so we are sure it is immutable. - log.Debug("handle.get: resolved", "ruid", r.ruid, "key", addr) + log.Debug("handle.get: resolved", "ruid", ruid, "key", addr) // if path is set, interpret as a manifest and return the // raw entry at the given path - if r.uri.Path != "" { - walker, err := s.api.NewManifestWalker(addr, nil) - if err != nil { - getFail.Inc(1) - Respond(w, r, fmt.Sprintf("%s is not a manifest", addr), http.StatusBadRequest) - return - } - var entry *api.ManifestEntry - walker.Walk(func(e *api.ManifestEntry) error { - // if the entry matches the path, set entry and stop - // the walk - if e.Path == r.uri.Path { - entry = e - // return an error to cancel the walk - return errors.New("found") - } - // ignore non-manifest files - if e.ContentType != api.ManifestType { - return nil - } - - // if the manifest's path is a prefix of the - // requested path, recurse into it by returning - // nil and continuing the walk - if strings.HasPrefix(r.uri.Path, e.Path) { - return nil - } - - return api.ErrSkipManifest - }) - if entry == nil { - getFail.Inc(1) - Respond(w, r, fmt.Sprintf("manifest entry could not be loaded"), http.StatusNotFound) - return - } - addr = storage.Address(common.Hex2Bytes(entry.Hash)) - } etag := common.Bytes2Hex(addr) noneMatchEtag := r.Header.Get("If-None-Match") w.Header().Set("ETag", fmt.Sprintf("%q", etag)) // set etag to manifest key or raw entry key. if noneMatchEtag != "" { if bytes.Equal(storage.Address(common.Hex2Bytes(noneMatchEtag)), addr) { - Respond(w, r, "Not Modified", http.StatusNotModified) + w.WriteHeader(http.StatusNotModified) return } } // check the root chunk exists by retrieving the file's size - reader, isEncrypted := s.api.Retrieve(addr) - if _, err := reader.Size(nil); err != nil { + reader, isEncrypted := s.api.Retrieve(r.Context(), addr) + if _, err := reader.Size(r.Context(), nil); err != nil { getFail.Inc(1) - Respond(w, r, fmt.Sprintf("root chunk not found %s: %s", addr, err), http.StatusNotFound) + RespondError(w, r, fmt.Sprintf("root chunk not found %s: %s", addr, err), http.StatusNotFound) return } w.Header().Set("X-Decrypted", fmt.Sprintf("%v", isEncrypted)) switch { - case r.uri.Raw(): + case uri.Raw(): // allow the request to overwrite the content type using a query // parameter contentType := "application/octet-stream" @@ -710,115 +735,47 @@ func (s *Server) HandleGet(w http.ResponseWriter, r *Request) { contentType = typ } w.Header().Set("Content-Type", contentType) - http.ServeContent(w, &r.Request, "", time.Now(), reader) - case r.uri.Hash(): + http.ServeContent(w, r, "", time.Now(), reader) + case uri.Hash(): w.Header().Set("Content-Type", "text/plain") w.WriteHeader(http.StatusOK) fmt.Fprint(w, addr) } } -// HandleGetFiles handles a GET request to bzz:/ with an Accept -// header of "application/x-tar" and returns a tar stream of all files -// contained in the manifest -func (s *Server) HandleGetFiles(w http.ResponseWriter, r *Request) { - log.Debug("handle.get.files", "ruid", r.ruid, "uri", r.uri) - getFilesCount.Inc(1) - if r.uri.Path != "" { - getFilesFail.Inc(1) - Respond(w, r, "files request cannot contain a path", http.StatusBadRequest) - return - } - - addr, err := s.api.Resolve(r.uri) - if err != nil { - getFilesFail.Inc(1) - Respond(w, r, fmt.Sprintf("cannot resolve %s: %s", r.uri.Addr, err), http.StatusNotFound) - return - } - log.Debug("handle.get.files: resolved", "ruid", r.ruid, "key", addr) - - walker, err := s.api.NewManifestWalker(addr, nil) - if err != nil { - getFilesFail.Inc(1) - Respond(w, r, err.Error(), http.StatusInternalServerError) - return - } - - tw := tar.NewWriter(w) - defer tw.Close() - w.Header().Set("Content-Type", "application/x-tar") - w.WriteHeader(http.StatusOK) - - err = walker.Walk(func(entry *api.ManifestEntry) error { - // ignore manifests (walk will recurse into them) - if entry.ContentType == api.ManifestType { - return nil - } - - // retrieve the entry's key and size - reader, isEncrypted := s.api.Retrieve(storage.Address(common.Hex2Bytes(entry.Hash))) - size, err := reader.Size(nil) - if err != nil { - return err - } - w.Header().Set("X-Decrypted", fmt.Sprintf("%v", isEncrypted)) - - // write a tar header for the entry - hdr := &tar.Header{ - Name: entry.Path, - Mode: entry.Mode, - Size: size, - ModTime: entry.ModTime, - Xattrs: map[string]string{ - "user.swarm.content-type": entry.ContentType, - }, - } - if err := tw.WriteHeader(hdr); err != nil { - return err - } - - // copy the file into the tar stream - n, err := io.Copy(tw, io.LimitReader(reader, hdr.Size)) - if err != nil { - return err - } else if n != size { - return fmt.Errorf("error writing %s: expected %d bytes but sent %d", entry.Path, size, n) - } - - return nil - }) - if err != nil { - getFilesFail.Inc(1) - log.Error(fmt.Sprintf("error generating tar stream: %s", err)) - } -} - // HandleGetList handles a GET request to bzz-list:// and returns // a list of all files contained in under grouped into // common prefixes using "/" as a delimiter -func (s *Server) HandleGetList(w http.ResponseWriter, r *Request) { - log.Debug("handle.get.list", "ruid", r.ruid, "uri", r.uri) +func (s *Server) HandleGetList(w http.ResponseWriter, r *http.Request) { + ruid := GetRUID(r.Context()) + uri := GetURI(r.Context()) + _, credentials, _ := r.BasicAuth() + log.Debug("handle.get.list", "ruid", ruid, "uri", uri) getListCount.Inc(1) + // ensure the root path has a trailing slash so that relative URLs work - if r.uri.Path == "" && !strings.HasSuffix(r.URL.Path, "/") { - http.Redirect(w, &r.Request, r.URL.Path+"/", http.StatusMovedPermanently) + if uri.Path == "" && !strings.HasSuffix(r.URL.Path, "/") { + http.Redirect(w, r, r.URL.Path+"/", http.StatusMovedPermanently) return } - addr, err := s.api.Resolve(r.uri) + addr, err := s.api.Resolve(r.Context(), uri.Addr) if err != nil { getListFail.Inc(1) - Respond(w, r, fmt.Sprintf("cannot resolve %s: %s", r.uri.Addr, err), http.StatusNotFound) + RespondError(w, r, fmt.Sprintf("cannot resolve %s: %s", uri.Addr, err), http.StatusNotFound) return } - log.Debug("handle.get.list: resolved", "ruid", r.ruid, "key", addr) - - list, err := s.getManifestList(addr, r.uri.Path) + log.Debug("handle.get.list: resolved", "ruid", ruid, "key", addr) + list, err := s.api.GetManifestList(r.Context(), s.api.Decryptor(r.Context(), credentials), addr, uri.Path) if err != nil { getListFail.Inc(1) - Respond(w, r, err.Error(), http.StatusInternalServerError) + if isDecryptError(err) { + w.Header().Set("WWW-Authenticate", fmt.Sprintf("Basic realm=%q", addr.String())) + RespondError(w, r, err.Error(), http.StatusUnauthorized) + return + } + RespondError(w, r, err.Error(), http.StatusInternalServerError) return } @@ -826,11 +783,11 @@ func (s *Server) HandleGetList(w http.ResponseWriter, r *Request) { // HTML index with relative URLs if strings.Contains(r.Header.Get("Accept"), "text/html") { w.Header().Set("Content-Type", "text/html") - err := htmlListTemplate.Execute(w, &htmlListData{ + err := TemplatesMap["bzz-list"].Execute(w, &htmlListData{ URI: &api.URI{ Scheme: "bzz", - Addr: r.uri.Addr, - Path: r.uri.Path, + Addr: uri.Addr, + Path: uri.Path, }, List: &list, }) @@ -845,108 +802,62 @@ func (s *Server) HandleGetList(w http.ResponseWriter, r *Request) { json.NewEncoder(w).Encode(&list) } -func (s *Server) getManifestList(addr storage.Address, prefix string) (list api.ManifestList, err error) { - walker, err := s.api.NewManifestWalker(addr, nil) - if err != nil { - return - } - - err = walker.Walk(func(entry *api.ManifestEntry) error { - // handle non-manifest files - if entry.ContentType != api.ManifestType { - // ignore the file if it doesn't have the specified prefix - if !strings.HasPrefix(entry.Path, prefix) { - return nil - } - - // if the path after the prefix contains a slash, add a - // common prefix to the list, otherwise add the entry - suffix := strings.TrimPrefix(entry.Path, prefix) - if index := strings.Index(suffix, "/"); index > -1 { - list.CommonPrefixes = append(list.CommonPrefixes, prefix+suffix[:index+1]) - return nil - } - if entry.Path == "" { - entry.Path = "/" - } - list.Entries = append(list.Entries, entry) - return nil - } - - // if the manifest's path is a prefix of the specified prefix - // then just recurse into the manifest by returning nil and - // continuing the walk - if strings.HasPrefix(prefix, entry.Path) { - return nil - } - - // if the manifest's path has the specified prefix, then if the - // path after the prefix contains a slash, add a common prefix - // to the list and skip the manifest, otherwise recurse into - // the manifest by returning nil and continuing the walk - if strings.HasPrefix(entry.Path, prefix) { - suffix := strings.TrimPrefix(entry.Path, prefix) - if index := strings.Index(suffix, "/"); index > -1 { - list.CommonPrefixes = append(list.CommonPrefixes, prefix+suffix[:index+1]) - return api.ErrSkipManifest - } - return nil - } - - // the manifest neither has the prefix or needs recursing in to - // so just skip it - return api.ErrSkipManifest - }) - - return list, nil -} - // HandleGetFile handles a GET request to bzz:/// and responds // with the content of the file at from the given -func (s *Server) HandleGetFile(w http.ResponseWriter, r *Request) { - log.Debug("handle.get.file", "ruid", r.ruid) +func (s *Server) HandleGetFile(w http.ResponseWriter, r *http.Request) { + ruid := GetRUID(r.Context()) + uri := GetURI(r.Context()) + _, credentials, _ := r.BasicAuth() + log.Debug("handle.get.file", "ruid", ruid, "uri", r.RequestURI) getFileCount.Inc(1) + // ensure the root path has a trailing slash so that relative URLs work - if r.uri.Path == "" && !strings.HasSuffix(r.URL.Path, "/") { - http.Redirect(w, &r.Request, r.URL.Path+"/", http.StatusMovedPermanently) + if uri.Path == "" && !strings.HasSuffix(r.URL.Path, "/") { + http.Redirect(w, r, r.URL.Path+"/", http.StatusMovedPermanently) return } var err error - manifestAddr := r.uri.Address() + manifestAddr := uri.Address() if manifestAddr == nil { - manifestAddr, err = s.api.Resolve(r.uri) + manifestAddr, err = s.api.ResolveURI(r.Context(), uri, credentials) if err != nil { getFileFail.Inc(1) - Respond(w, r, fmt.Sprintf("cannot resolve %s: %s", r.uri.Addr, err), http.StatusNotFound) + RespondError(w, r, fmt.Sprintf("cannot resolve %s: %s", uri.Addr, err), http.StatusNotFound) return } } else { w.Header().Set("Cache-Control", "max-age=2147483648, immutable") // url was of type bzz:///path, so we are sure it is immutable. } - log.Debug("handle.get.file: resolved", "ruid", r.ruid, "key", manifestAddr) + log.Debug("handle.get.file: resolved", "ruid", ruid, "key", manifestAddr) - reader, contentType, status, contentKey, err := s.api.Get(manifestAddr, r.uri.Path) + reader, contentType, status, contentKey, err := s.api.Get(r.Context(), s.api.Decryptor(r.Context(), credentials), manifestAddr, uri.Path) etag := common.Bytes2Hex(contentKey) noneMatchEtag := r.Header.Get("If-None-Match") w.Header().Set("ETag", fmt.Sprintf("%q", etag)) // set etag to actual content key. if noneMatchEtag != "" { if bytes.Equal(storage.Address(common.Hex2Bytes(noneMatchEtag)), contentKey) { - Respond(w, r, "Not Modified", http.StatusNotModified) + w.WriteHeader(http.StatusNotModified) return } } if err != nil { + if isDecryptError(err) { + w.Header().Set("WWW-Authenticate", fmt.Sprintf("Basic realm=%q", manifestAddr)) + RespondError(w, r, err.Error(), http.StatusUnauthorized) + return + } + switch status { case http.StatusNotFound: getFileNotFound.Inc(1) - Respond(w, r, err.Error(), http.StatusNotFound) + RespondError(w, r, err.Error(), http.StatusNotFound) default: getFileFail.Inc(1) - Respond(w, r, err.Error(), http.StatusInternalServerError) + RespondError(w, r, err.Error(), http.StatusInternalServerError) } return } @@ -954,29 +865,33 @@ func (s *Server) HandleGetFile(w http.ResponseWriter, r *Request) { //the request results in ambiguous files //e.g. /read with readme.md and readinglist.txt available in manifest if status == http.StatusMultipleChoices { - list, err := s.getManifestList(manifestAddr, r.uri.Path) - + list, err := s.api.GetManifestList(r.Context(), s.api.Decryptor(r.Context(), credentials), manifestAddr, uri.Path) if err != nil { getFileFail.Inc(1) - Respond(w, r, err.Error(), http.StatusInternalServerError) + if isDecryptError(err) { + w.Header().Set("WWW-Authenticate", fmt.Sprintf("Basic realm=%q", manifestAddr)) + RespondError(w, r, err.Error(), http.StatusUnauthorized) + return + } + RespondError(w, r, err.Error(), http.StatusInternalServerError) return } - log.Debug(fmt.Sprintf("Multiple choices! --> %v", list), "ruid", r.ruid) + log.Debug(fmt.Sprintf("Multiple choices! --> %v", list), "ruid", ruid) //show a nice page links to available entries ShowMultipleChoices(w, r, list) return } // check the root chunk exists by retrieving the file's size - if _, err := reader.Size(nil); err != nil { + if _, err := reader.Size(r.Context(), nil); err != nil { getFileNotFound.Inc(1) - Respond(w, r, fmt.Sprintf("file not found %s: %s", r.uri, err), http.StatusNotFound) + RespondError(w, r, fmt.Sprintf("file not found %s: %s", uri, err), http.StatusNotFound) return } w.Header().Set("Content-Type", contentType) - http.ServeContent(w, &r.Request, "", time.Now(), newBufferedReadSeeker(reader, getFileBufferSize)) + http.ServeContent(w, r, "", time.Now(), newBufferedReadSeeker(reader, getFileBufferSize)) } // The size of buffer used for bufio.Reader on LazyChunkReader passed to @@ -1010,123 +925,6 @@ func (b bufferedReadSeeker) Seek(offset int64, whence int) (int64, error) { return b.s.Seek(offset, whence) } -func (s *Server) ServeHTTP(rw http.ResponseWriter, r *http.Request) { - defer metrics.GetOrRegisterResettingTimer(fmt.Sprintf("http.request.%s.time", r.Method), nil).UpdateSince(time.Now()) - req := &Request{Request: *r, ruid: uuid.New()[:8]} - metrics.GetOrRegisterCounter(fmt.Sprintf("http.request.%s", r.Method), nil).Inc(1) - log.Info("serving request", "ruid", req.ruid, "method", r.Method, "url", r.RequestURI) - - // wrapping the ResponseWriter, so that we get the response code set by http.ServeContent - w := newLoggingResponseWriter(rw) - - if r.RequestURI == "/" && strings.Contains(r.Header.Get("Accept"), "text/html") { - - err := landingPageTemplate.Execute(w, nil) - if err != nil { - log.Error(fmt.Sprintf("error rendering landing page: %s", err)) - } - return - } - - if r.URL.Path == "/robots.txt" { - w.Header().Set("Last-Modified", time.Now().Format(http.TimeFormat)) - fmt.Fprintf(w, "User-agent: *\nDisallow: /") - return - } - - if r.RequestURI == "/" && strings.Contains(r.Header.Get("Accept"), "application/json") { - w.Header().Set("Content-Type", "application/json") - w.WriteHeader(http.StatusOK) - json.NewEncoder(w).Encode("Welcome to Swarm!") - return - } - - uri, err := api.Parse(strings.TrimLeft(r.URL.Path, "/")) - if err != nil { - Respond(w, req, fmt.Sprintf("invalid URI %q", r.URL.Path), http.StatusBadRequest) - return - } - - req.uri = uri - - log.Debug("parsed request path", "ruid", req.ruid, "method", req.Method, "uri.Addr", req.uri.Addr, "uri.Path", req.uri.Path, "uri.Scheme", req.uri.Scheme) - - switch r.Method { - case "POST": - if uri.Raw() { - log.Debug("handlePostRaw") - s.HandlePostRaw(w, req) - } else if uri.Resource() { - log.Debug("handlePostResource") - s.HandlePostResource(w, req) - } else if uri.Immutable() || uri.List() || uri.Hash() { - log.Debug("POST not allowed on immutable, list or hash") - Respond(w, req, fmt.Sprintf("POST method on scheme %s not allowed", uri.Scheme), http.StatusMethodNotAllowed) - } else { - log.Debug("handlePostFiles") - s.HandlePostFiles(w, req) - } - - case "PUT": - Respond(w, req, fmt.Sprintf("PUT method to %s not allowed", uri), http.StatusBadRequest) - return - - case "DELETE": - if uri.Raw() { - Respond(w, req, fmt.Sprintf("DELETE method to %s not allowed", uri), http.StatusBadRequest) - return - } - s.HandleDelete(w, req) - - case "GET": - - if uri.Resource() { - s.HandleGetResource(w, req) - return - } - - if uri.Raw() || uri.Hash() { - s.HandleGet(w, req) - return - } - - if uri.List() { - s.HandleGetList(w, req) - return - } - - if r.Header.Get("Accept") == "application/x-tar" { - s.HandleGetFiles(w, req) - return - } - - s.HandleGetFile(w, req) - - default: - Respond(w, req, fmt.Sprintf("%s method is not supported", r.Method), http.StatusMethodNotAllowed) - } - - log.Info("served response", "ruid", req.ruid, "code", w.statusCode) -} - -func (s *Server) updateManifest(addr storage.Address, update func(mw *api.ManifestWriter) error) (storage.Address, error) { - mw, err := s.api.NewManifestWriter(addr, nil) - if err != nil { - return nil, err - } - - if err := update(mw); err != nil { - return nil, err - } - - addr, err = mw.Store() - if err != nil { - return nil, err - } - log.Debug(fmt.Sprintf("generated manifest %s", addr)) - return addr, nil -} - type loggingResponseWriter struct { http.ResponseWriter statusCode int @@ -1140,3 +938,7 @@ func (lrw *loggingResponseWriter) WriteHeader(code int) { lrw.statusCode = code lrw.ResponseWriter.WriteHeader(code) } + +func isDecryptError(err error) bool { + return strings.Contains(err.Error(), api.ErrDecrypt.Error()) +} diff --git a/swarm/api/http/server_test.go b/swarm/api/http/server_test.go index 9fb21f7a357a..7934e37eba31 100644 --- a/swarm/api/http/server_test.go +++ b/swarm/api/http/server_test.go @@ -17,25 +17,32 @@ package http import ( + "archive/tar" "bytes" + "context" "crypto/rand" "encoding/json" "errors" "flag" "fmt" + "io" "io/ioutil" + "mime/multipart" "net/http" "os" + "strconv" "strings" "testing" + "time" "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/swarm/api" swarm "github.com/ethereum/go-ethereum/swarm/api/client" "github.com/ethereum/go-ethereum/swarm/multihash" "github.com/ethereum/go-ethereum/swarm/storage" + "github.com/ethereum/go-ethereum/swarm/storage/mru" "github.com/ethereum/go-ethereum/swarm/testutil" ) @@ -87,7 +94,15 @@ func TestResourcePostMode(t *testing.T) { } func serverFunc(api *api.API) testutil.TestServer { - return NewServer(api) + return NewServer(api, "") +} + +func newTestSigner() (*mru.GenericSigner, error) { + privKey, err := crypto.HexToECDSA("deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef") + if err != nil { + return nil, err + } + return mru.NewGenericSigner(privKey), nil } // test the transparent resolving of multihash resource types with bzz:// scheme @@ -97,6 +112,8 @@ func serverFunc(api *api.API) testutil.TestServer { // and raw retrieve of that hash should return the data func TestBzzResourceMultihash(t *testing.T) { + signer, _ := newTestSigner() + srv := testutil.NewTestSwarmServer(t, serverFunc) defer srv.Close() @@ -119,15 +136,35 @@ func TestBzzResourceMultihash(t *testing.T) { s := common.FromHex(string(b)) mh := multihash.ToMultihash(s) - mhHex := hexutil.Encode(mh) log.Info("added data", "manifest", string(b), "data", common.ToHex(mh)) // our mutable resource "name" keybytes := "foo.eth" + updateRequest, err := mru.NewCreateUpdateRequest(&mru.ResourceMetadata{ + Name: keybytes, + Frequency: 13, + StartTime: srv.GetCurrentTime(), + Owner: signer.Address(), + }) + if err != nil { + t.Fatal(err) + } + updateRequest.SetData(mh, true) + + if err := updateRequest.Sign(signer); err != nil { + t.Fatal(err) + } + log.Info("added data", "manifest", string(b), "data", common.ToHex(mh)) + + body, err := updateRequest.MarshalJSON() + if err != nil { + t.Fatal(err) + } + // create the multihash update - url = fmt.Sprintf("%s/bzz-resource:/%s/13", srv.URL, keybytes) - resp, err = http.Post(url, "application/octet-stream", bytes.NewReader([]byte(mhHex))) + url = fmt.Sprintf("%s/bzz-resource:/", srv.URL) + resp, err = http.Post(url, "application/json", bytes.NewReader(body)) if err != nil { t.Fatal(err) } @@ -145,9 +182,9 @@ func TestBzzResourceMultihash(t *testing.T) { t.Fatalf("data %s could not be unmarshaled: %v", b, err) } - correctManifestAddrHex := "d689648fb9e00ddc7ebcf474112d5881c5bf7dbc6e394681b1d224b11b59b5e0" + correctManifestAddrHex := "6d3bc4664c97d8b821cb74bcae43f592494fb46d2d9cd31e69f3c7c802bbbd8e" if rsrcResp.Hex() != correctManifestAddrHex { - t.Fatalf("Response resource key mismatch, expected '%s', got '%s'", correctManifestAddrHex, rsrcResp) + t.Fatalf("Response resource key mismatch, expected '%s', got '%s'", correctManifestAddrHex, rsrcResp.Hex()) } // get bzz manifest transparent resource resolve @@ -172,6 +209,8 @@ func TestBzzResourceMultihash(t *testing.T) { // Test resource updates using the raw update methods func TestBzzResource(t *testing.T) { srv := testutil.NewTestSwarmServer(t, serverFunc) + signer, _ := newTestSigner() + defer srv.Close() // our mutable resource "name" @@ -184,9 +223,29 @@ func TestBzzResource(t *testing.T) { t.Fatal(err) } + updateRequest, err := mru.NewCreateUpdateRequest(&mru.ResourceMetadata{ + Name: keybytes, + Frequency: 13, + StartTime: srv.GetCurrentTime(), + Owner: signer.Address(), + }) + if err != nil { + t.Fatal(err) + } + updateRequest.SetData(databytes, false) + + if err := updateRequest.Sign(signer); err != nil { + t.Fatal(err) + } + + body, err := updateRequest.MarshalJSON() + if err != nil { + t.Fatal(err) + } + // creates resource and sets update 1 - url := fmt.Sprintf("%s/bzz-resource:/%s/raw/13", srv.URL, []byte(keybytes)) - resp, err := http.Post(url, "application/octet-stream", bytes.NewReader(databytes)) + url := fmt.Sprintf("%s/bzz-resource:/", srv.URL) + resp, err := http.Post(url, "application/json", bytes.NewReader(body)) if err != nil { t.Fatal(err) } @@ -204,7 +263,7 @@ func TestBzzResource(t *testing.T) { t.Fatalf("data %s could not be unmarshaled: %v", b, err) } - correctManifestAddrHex := "d689648fb9e00ddc7ebcf474112d5881c5bf7dbc6e394681b1d224b11b59b5e0" + correctManifestAddrHex := "6d3bc4664c97d8b821cb74bcae43f592494fb46d2d9cd31e69f3c7c802bbbd8e" if rsrcResp.Hex() != correctManifestAddrHex { t.Fatalf("Response resource key mismatch, expected '%s', got '%s'", correctManifestAddrHex, rsrcResp.Hex()) } @@ -231,8 +290,7 @@ func TestBzzResource(t *testing.T) { if len(manifest.Entries) != 1 { t.Fatalf("Manifest has %d entries", len(manifest.Entries)) } - - correctRootKeyHex := "f667277e004e8486c7a3631fd226802430e84e9a81b6085d31f512a591ae0065" + correctRootKeyHex := "68f7ba07ac8867a4c841a4d4320e3cdc549df23702dc7285fcb6acf65df48562" if manifest.Entries[0].Hash != correctRootKeyHex { t.Fatalf("Expected manifest path '%s', got '%s'", correctRootKeyHex, manifest.Entries[0].Hash) } @@ -258,6 +316,11 @@ func TestBzzResource(t *testing.T) { if err != nil { t.Fatal(err) } + + if resp.StatusCode != http.StatusNotFound { + t.Fatalf("Expected get non-existent resource to fail with StatusNotFound (404), got %d", resp.StatusCode) + } + resp.Body.Close() // get latest update (1.1) through resource directly @@ -281,9 +344,36 @@ func TestBzzResource(t *testing.T) { // update 2 log.Info("update 2") - url = fmt.Sprintf("%s/bzz-resource:/%s/raw", srv.URL, correctManifestAddrHex) + + // 1.- get metadata about this resource + url = fmt.Sprintf("%s/bzz-resource:/%s/", srv.URL, correctManifestAddrHex) + resp, err = http.Get(url + "meta") + if err != nil { + t.Fatal(err) + } + defer resp.Body.Close() + if resp.StatusCode != http.StatusOK { + t.Fatalf("Get resource metadata returned %s", resp.Status) + } + b, err = ioutil.ReadAll(resp.Body) + if err != nil { + t.Fatal(err) + } + updateRequest = &mru.Request{} + if err = updateRequest.UnmarshalJSON(b); err != nil { + t.Fatalf("Error decoding resource metadata: %s", err) + } data := []byte("foo") - resp, err = http.Post(url, "application/octet-stream", bytes.NewReader(data)) + updateRequest.SetData(data, false) + if err = updateRequest.Sign(signer); err != nil { + t.Fatal(err) + } + body, err = updateRequest.MarshalJSON() + if err != nil { + t.Fatal(err) + } + + resp, err = http.Post(url, "application/json", bytes.NewReader(body)) if err != nil { t.Fatal(err) } @@ -382,15 +472,19 @@ func testBzzGetPath(encrypted bool, t *testing.T) { for i, mf := range testmanifest { reader[i] = bytes.NewReader([]byte(mf)) - var wait func() - addr[i], wait, err = srv.FileStore.Store(reader[i], int64(len(mf)), encrypted) + var wait func(context.Context) error + ctx := context.TODO() + addr[i], wait, err = srv.FileStore.Store(ctx, reader[i], int64(len(mf)), encrypted) for j := i + 1; j < len(testmanifest); j++ { testmanifest[j] = strings.Replace(testmanifest[j], fmt.Sprintf("", i), addr[i].Hex(), -1) } if err != nil { t.Fatal(err) } - wait() + err = wait(ctx) + if err != nil { + t.Fatal(err) + } } rootRef := addr[2].Hex() @@ -464,24 +558,35 @@ func testBzzGetPath(encrypted bool, t *testing.T) { ref := addr[2].Hex() for _, c := range []struct { - path string - json string - html string + path string + json string + pageFragments []string }{ { path: "/", json: `{"common_prefixes":["a/"]}`, - html: fmt.Sprintf("\n\n\n \n \n\t\t\n\tSwarm index of bzz:/%s/\n\n\n\n

Swarm index of bzz:/%s/

\n
\n \n \n \n\t\n\t\n\t\n \n \n\n \n \n\t\n\t \n\t \n\t \n\t\n \n\n \n
PathTypeSize
a/DIR-
\n
\n\n", ref, ref), + pageFragments: []string{ + fmt.Sprintf("Swarm index of bzz:/%s/", ref), + `a/`, + }, }, { path: "/a/", json: `{"common_prefixes":["a/b/"],"entries":[{"hash":"011b4d03dd8c01f1049143cf9c4c817e4b167f1d1b83e5c6f0f10d89ba1e7bce","path":"a/a","mod_time":"0001-01-01T00:00:00Z"}]}`, - html: fmt.Sprintf("\n\n\n \n \n\t\t\n\tSwarm index of bzz:/%s/a/\n\n\n\n

Swarm index of bzz:/%s/a/

\n
\n \n \n \n\t\n\t\n\t\n \n \n\n \n \n\t\n\t \n\t \n\t \n\t\n \n\n \n\t\n\t \n\t \n\t \n\t\n \n
PathTypeSize
b/DIR-
a0
\n
\n\n", ref, ref), + pageFragments: []string{ + fmt.Sprintf("Swarm index of bzz:/%s/a/", ref), + `b/`, + fmt.Sprintf(`a`, ref), + }, }, { path: "/a/b/", json: `{"entries":[{"hash":"011b4d03dd8c01f1049143cf9c4c817e4b167f1d1b83e5c6f0f10d89ba1e7bce","path":"a/b/b","mod_time":"0001-01-01T00:00:00Z"},{"hash":"011b4d03dd8c01f1049143cf9c4c817e4b167f1d1b83e5c6f0f10d89ba1e7bce","path":"a/b/c","mod_time":"0001-01-01T00:00:00Z"}]}`, - html: fmt.Sprintf("\n\n\n \n \n\t\t\n\tSwarm index of bzz:/%s/a/b/\n\n\n\n

Swarm index of bzz:/%s/a/b/

\n
\n \n \n \n\t\n\t\n\t\n \n \n\n \n \n\n \n\t\n\t \n\t \n\t \n\t\n \n\t\n\t \n\t \n\t \n\t\n \n
PathTypeSize
b0
c0
\n
\n\n", ref, ref), + pageFragments: []string{ + fmt.Sprintf("Swarm index of bzz:/%s/a/b/", ref), + fmt.Sprintf(`b`, ref), + fmt.Sprintf(`c`, ref), + }, }, { path: "/x", @@ -531,21 +636,25 @@ func testBzzGetPath(encrypted bool, t *testing.T) { t.Fatalf("HTTP request: %v", err) } defer resp.Body.Close() - respbody, err := ioutil.ReadAll(resp.Body) + b, err := ioutil.ReadAll(resp.Body) if err != nil { t.Fatalf("Read response body: %v", err) } - if string(respbody) != c.html { - isexpectedfailrequest := false + body := string(b) - for _, r := range expectedfailrequests { - if k[:] == r { - isexpectedfailrequest = true + for _, f := range c.pageFragments { + if !strings.Contains(body, f) { + isexpectedfailrequest := false + + for _, r := range expectedfailrequests { + if k[:] == r { + isexpectedfailrequest = true + } + } + if !isexpectedfailrequest { + t.Errorf("Response list body %q does not contain %q: body %q", k, f, body) } - } - if !isexpectedfailrequest { - t.Errorf("Response list body %q does not match, expected: %q, got %q", k, c.html, string(respbody)) } } }) @@ -560,11 +669,11 @@ func testBzzGetPath(encrypted bool, t *testing.T) { } nonhashresponses := []string{ - "cannot resolve name: no DNS to resolve name: "name"", - "cannot resolve nonhash: immutable address not a content hash: "nonhash"", - "cannot resolve nonhash: no DNS to resolve name: "nonhash"", - "cannot resolve nonhash: no DNS to resolve name: "nonhash"", - "cannot resolve nonhash: no DNS to resolve name: "nonhash"", + `cannot resolve name: no DNS to resolve name: "name"`, + `cannot resolve nonhash: immutable address not a content hash: "nonhash"`, + `cannot resolve nonhash: no DNS to resolve name: "nonhash"`, + `cannot resolve nonhash: no DNS to resolve name: "nonhash"`, + `cannot resolve nonhash: no DNS to resolve name: "nonhash"`, } for i, url := range nonhashtests { @@ -587,6 +696,126 @@ func testBzzGetPath(encrypted bool, t *testing.T) { } } +func TestBzzTar(t *testing.T) { + testBzzTar(false, t) + testBzzTar(true, t) +} + +func testBzzTar(encrypted bool, t *testing.T) { + srv := testutil.NewTestSwarmServer(t, serverFunc) + defer srv.Close() + fileNames := []string{"tmp1.txt", "tmp2.lock", "tmp3.rtf"} + fileContents := []string{"tmp1textfilevalue", "tmp2lockfilelocked", "tmp3isjustaplaintextfile"} + + buf := &bytes.Buffer{} + tw := tar.NewWriter(buf) + defer tw.Close() + + for i, v := range fileNames { + size := int64(len(fileContents[i])) + hdr := &tar.Header{ + Name: v, + Mode: 0644, + Size: size, + ModTime: time.Now(), + Xattrs: map[string]string{ + "user.swarm.content-type": "text/plain", + }, + } + if err := tw.WriteHeader(hdr); err != nil { + t.Fatal(err) + } + + // copy the file into the tar stream + n, err := io.Copy(tw, bytes.NewBufferString(fileContents[i])) + if err != nil { + t.Fatal(err) + } else if n != size { + t.Fatal("size mismatch") + } + } + + //post tar stream + url := srv.URL + "/bzz:/" + if encrypted { + url = url + "encrypt" + } + req, err := http.NewRequest("POST", url, buf) + if err != nil { + t.Fatal(err) + } + req.Header.Add("Content-Type", "application/x-tar") + client := &http.Client{} + resp2, err := client.Do(req) + if err != nil { + t.Fatal(err) + } + if resp2.StatusCode != http.StatusOK { + t.Fatalf("err %s", resp2.Status) + } + swarmHash, err := ioutil.ReadAll(resp2.Body) + resp2.Body.Close() + if err != nil { + t.Fatal(err) + } + + // now do a GET to get a tarball back + req, err = http.NewRequest("GET", fmt.Sprintf(srv.URL+"/bzz:/%s", string(swarmHash)), nil) + if err != nil { + t.Fatal(err) + } + req.Header.Add("Accept", "application/x-tar") + resp2, err = client.Do(req) + if err != nil { + t.Fatal(err) + } + defer resp2.Body.Close() + + file, err := ioutil.TempFile("", "swarm-downloaded-tarball") + if err != nil { + t.Fatal(err) + } + defer os.Remove(file.Name()) + _, err = io.Copy(file, resp2.Body) + if err != nil { + t.Fatalf("error getting tarball: %v", err) + } + file.Sync() + file.Close() + + tarFileHandle, err := os.Open(file.Name()) + if err != nil { + t.Fatal(err) + } + tr := tar.NewReader(tarFileHandle) + + for { + hdr, err := tr.Next() + if err == io.EOF { + break + } else if err != nil { + t.Fatalf("error reading tar stream: %s", err) + } + bb := make([]byte, hdr.Size) + _, err = tr.Read(bb) + if err != nil && err != io.EOF { + t.Fatal(err) + } + passed := false + for i, v := range fileNames { + if v == hdr.Name { + if string(bb) == fileContents[i] { + passed = true + break + } + } + } + if !passed { + t.Fatalf("file %s did not pass content assertion", hdr.Name) + } + } +} + // TestBzzRootRedirect tests that getting the root path of a manifest without // a trailing slash gets redirected to include the trailing slash so that // relative URLs work as expected. @@ -674,8 +903,253 @@ func TestMethodsNotAllowed(t *testing.T) { } { res, _ := http.Post(c.url, "text/plain", bytes.NewReader([]byte(databytes))) if res.StatusCode != c.code { - t.Fatal("should have failed") + t.Fatalf("should have failed. requested url: %s, expected code %d, got %d", c.url, c.code, res.StatusCode) } } } + +func httpDo(httpMethod string, url string, reqBody io.Reader, headers map[string]string, verbose bool, t *testing.T) (*http.Response, string) { + // Build the Request + req, err := http.NewRequest(httpMethod, url, reqBody) + if err != nil { + t.Fatal(err) + } + for key, value := range headers { + req.Header.Set(key, value) + } + if verbose { + t.Log(req.Method, req.URL, req.Header, req.Body) + } + + // Send Request out + httpClient := &http.Client{} + res, err := httpClient.Do(req) + if err != nil { + t.Fatal(err) + } + + // Read the HTTP Body + buffer, err := ioutil.ReadAll(res.Body) + if err != nil { + t.Fatal(err) + } + defer res.Body.Close() + body := string(buffer) + + return res, body +} + +func TestGet(t *testing.T) { + srv := testutil.NewTestSwarmServer(t, serverFunc) + defer srv.Close() + + for _, testCase := range []struct { + uri string + method string + headers map[string]string + expectedStatusCode int + assertResponseBody string + verbose bool + }{ + { + uri: fmt.Sprintf("%s/", srv.URL), + method: "GET", + headers: map[string]string{"Accept": "text/html"}, + expectedStatusCode: 200, + assertResponseBody: "Swarm: Serverless Hosting Incentivised Peer-To-Peer Storage And Content Distribution", + verbose: false, + }, + { + uri: fmt.Sprintf("%s/", srv.URL), + method: "GET", + headers: map[string]string{"Accept": "application/json"}, + expectedStatusCode: 200, + assertResponseBody: "Swarm: Please request a valid ENS or swarm hash with the appropriate bzz scheme", + verbose: false, + }, + { + uri: fmt.Sprintf("%s/robots.txt", srv.URL), + method: "GET", + headers: map[string]string{"Accept": "text/html"}, + expectedStatusCode: 200, + assertResponseBody: "User-agent: *\nDisallow: /", + verbose: false, + }, + { + uri: fmt.Sprintf("%s/nonexistent_path", srv.URL), + method: "GET", + headers: map[string]string{}, + expectedStatusCode: 404, + verbose: false, + }, + { + uri: fmt.Sprintf("%s/bzz:asdf/", srv.URL), + method: "GET", + headers: map[string]string{}, + expectedStatusCode: 404, + verbose: false, + }, + { + uri: fmt.Sprintf("%s/tbz2/", srv.URL), + method: "GET", + headers: map[string]string{}, + expectedStatusCode: 404, + verbose: false, + }, + { + uri: fmt.Sprintf("%s/bzz-rack:/", srv.URL), + method: "GET", + headers: map[string]string{}, + expectedStatusCode: 404, + verbose: false, + }, + { + uri: fmt.Sprintf("%s/bzz-ls", srv.URL), + method: "GET", + headers: map[string]string{}, + expectedStatusCode: 404, + verbose: false, + }, + } { + t.Run("GET "+testCase.uri, func(t *testing.T) { + res, body := httpDo(testCase.method, testCase.uri, nil, testCase.headers, testCase.verbose, t) + if res.StatusCode != testCase.expectedStatusCode { + t.Fatalf("expected status code %d but got %d", testCase.expectedStatusCode, res.StatusCode) + } + if testCase.assertResponseBody != "" && !strings.Contains(body, testCase.assertResponseBody) { + t.Fatalf("expected response to be: %s but got: %s", testCase.assertResponseBody, body) + } + }) + } +} + +func TestModify(t *testing.T) { + srv := testutil.NewTestSwarmServer(t, serverFunc) + defer srv.Close() + + swarmClient := swarm.NewClient(srv.URL) + data := []byte("data") + file := &swarm.File{ + ReadCloser: ioutil.NopCloser(bytes.NewReader(data)), + ManifestEntry: api.ManifestEntry{ + Path: "", + ContentType: "text/plain", + Size: int64(len(data)), + }, + } + + hash, err := swarmClient.Upload(file, "", false) + if err != nil { + t.Fatal(err) + } + + for _, testCase := range []struct { + uri string + method string + headers map[string]string + requestBody []byte + expectedStatusCode int + assertResponseBody string + assertResponseHeaders map[string]string + verbose bool + }{ + { + uri: fmt.Sprintf("%s/bzz:/%s", srv.URL, hash), + method: "DELETE", + headers: map[string]string{}, + expectedStatusCode: 200, + assertResponseBody: "8b634aea26eec353ac0ecbec20c94f44d6f8d11f38d4578a4c207a84c74ef731", + verbose: false, + }, + { + uri: fmt.Sprintf("%s/bzz:/%s", srv.URL, hash), + method: "PUT", + headers: map[string]string{}, + expectedStatusCode: 405, + verbose: false, + }, + { + uri: fmt.Sprintf("%s/bzz-raw:/%s", srv.URL, hash), + method: "PUT", + headers: map[string]string{}, + expectedStatusCode: 405, + verbose: false, + }, + { + uri: fmt.Sprintf("%s/bzz:/%s", srv.URL, hash), + method: "PATCH", + headers: map[string]string{}, + expectedStatusCode: 405, + verbose: false, + }, + { + uri: fmt.Sprintf("%s/bzz-raw:/", srv.URL), + method: "POST", + headers: map[string]string{}, + requestBody: []byte("POSTdata"), + expectedStatusCode: 200, + assertResponseHeaders: map[string]string{"Content-Length": "64"}, + verbose: false, + }, + { + uri: fmt.Sprintf("%s/bzz-raw:/encrypt", srv.URL), + method: "POST", + headers: map[string]string{}, + requestBody: []byte("POSTdata"), + expectedStatusCode: 200, + assertResponseHeaders: map[string]string{"Content-Length": "128"}, + verbose: false, + }, + } { + t.Run(testCase.method+" "+testCase.uri, func(t *testing.T) { + reqBody := bytes.NewReader(testCase.requestBody) + res, body := httpDo(testCase.method, testCase.uri, reqBody, testCase.headers, testCase.verbose, t) + + if res.StatusCode != testCase.expectedStatusCode { + t.Fatalf("expected status code %d but got %d", testCase.expectedStatusCode, res.StatusCode) + } + if testCase.assertResponseBody != "" && !strings.Contains(body, testCase.assertResponseBody) { + t.Log(body) + t.Fatalf("expected response %s but got %s", testCase.assertResponseBody, body) + } + for key, value := range testCase.assertResponseHeaders { + if res.Header.Get(key) != value { + t.Logf("expected %s=%s in HTTP response header but got %s", key, value, res.Header.Get(key)) + } + } + }) + } +} + +func TestMultiPartUpload(t *testing.T) { + // POST /bzz:/ Content-Type: multipart/form-data + verbose := false + // Setup Swarm + srv := testutil.NewTestSwarmServer(t, serverFunc) + defer srv.Close() + + url := fmt.Sprintf("%s/bzz:/", srv.URL) + + buf := new(bytes.Buffer) + form := multipart.NewWriter(buf) + form.WriteField("name", "John Doe") + file1, _ := form.CreateFormFile("cv", "cv.txt") + file1.Write([]byte("John Doe's Credentials")) + file2, _ := form.CreateFormFile("profile_picture", "profile.jpg") + file2.Write([]byte("imaginethisisjpegdata")) + form.Close() + + headers := map[string]string{ + "Content-Type": form.FormDataContentType(), + "Content-Length": strconv.Itoa(buf.Len()), + } + res, body := httpDo("POST", url, buf, headers, verbose, t) + + if res.StatusCode != 200 { + t.Fatalf("expected POST multipart/form-data to return 200, but it returned %d", res.StatusCode) + } + if len(body) != 64 { + t.Fatalf("expected POST multipart/form-data to return a 64 char manifest but the answer was %d chars long", len(body)) + } +} diff --git a/swarm/api/http/templates.go b/swarm/api/http/templates.go index ffd816493074..986f5f8873d3 100644 --- a/swarm/api/http/templates.go +++ b/swarm/api/http/templates.go @@ -17,6 +17,8 @@ package http import ( + "encoding/hex" + "fmt" "html/template" "path" @@ -28,187 +30,277 @@ type htmlListData struct { List *api.ManifestList } -var htmlListTemplate = template.Must(template.New("html-list").Funcs(template.FuncMap{"basename": path.Base}).Parse(` - - +var TemplatesMap = make(map[string]*template.Template) +var faviconBytes []byte + +func init() { + for _, v := range []struct { + templateName string + partial string + funcs template.FuncMap + }{ + { + templateName: "error", + partial: errorResponse, + }, + { + templateName: "bzz-list", + partial: bzzList, + funcs: template.FuncMap{ + "basename": path.Base, + "leaflink": leafLink, + }, + }, + { + templateName: "landing-page", + partial: landing, + }, + } { + TemplatesMap[v.templateName] = template.Must(template.New(v.templateName).Funcs(v.funcs).Parse(baseTemplate + css + v.partial + logo)) + } + + bytes, err := hex.DecodeString(favicon) + if err != nil { + panic(err) + } + faviconBytes = bytes +} + +func leafLink(URI api.URI, manifestEntry api.ManifestEntry) string { + return fmt.Sprintf("/bzz:/%s/%s", URI.Addr, manifestEntry.Path) +} + +const bzzList = `{{ define "content" }} +

Swarm index of {{ .URI }}

+
+ + + + + + + + + + + {{ range .List.CommonPrefixes }} + + + + + + {{ end }} + {{ range .List.Entries }} + + + + + + {{ end }} +
PathTypeSize
+ {{ basename . }}/ + DIR-
+ {{ basename .Path }} + {{ .ContentType }}{{ .Size }}
+
+ + {{ end }}` + +const errorResponse = `{{ define "content" }} +
+ + +
+

{{.Msg}}

+
+ +
+
Error code: {{.Code}}
+
+ + +
+{{ end }}` + +const landing = `{{ define "content" }} + + + +
+ + + + + +
+ +{{ end }}` + +const baseTemplate = ` - - - - Swarm index of {{ .URI }} + + + + + - -

Swarm index of {{ .URI }}

-
- - - - - - - - - - - {{ range .List.CommonPrefixes }} - - - - - - {{ end }} - - {{ range .List.Entries }} - - - - - - {{ end }} -
PathTypeSize
{{ basename . }}/DIR-
{{ basename .Path }}{{ .ContentType }}{{ .Size }}
-
+ {{ template "content" . }} -`[1:])) - -var landingPageTemplate = template.Must(template.New("landingPage").Parse(` - - - - - - - - Swarm :: Welcome to Swarm - - - - -
-
- -
-
-

Welcome to Swarm

-
-
- - - - -

Enter the hash or ENS of a Swarm-hosted file below:

- - - -
-
-

- Swarm: Serverless Hosting Incentivised Peer-To-Peer Storage And Content Distribution
- Swarm -

-
- - -`[1:])) +` + +const css = `{{ define "css" }} +html { + font-size: 18px; + font-size: 1.13rem; + -webkit-text-size-adjust: 100%; + -ms-text-size-adjust: 100%; + font-family: Helvetica, Arial, sans-serif; +} + +body { + background: #f6f6f6; + color: #333; +} + +a, a:visited, a:active { + color: darkorange; +} + +a.normal-link, a.normal-link:active { color: #0000EE; } +a.normal-link:visited { color: #551A8B; } + +table { + border-collapse: separate; +} + +td { + padding: 3px 10px; +} + + +.container { + max-width: 600px; + margin: 40px auto 40px; + text-align: center; +} + +.separate-block { + margin: 40px 0; + word-wrap: break-word; +} + +.footer { + font-size: 12px; + font-size: 0.75rem; + text-align: center; +} + +.orange { + color: #ffa500; +} + +.top-space { + margin-top: 20px; + margin-bottom: 20px; +} + +/* SVG Logos, editable */ + +.searchbar { + padding: 20px 20px 0; +} + +.logo { + margin: 100px 80px 0; +} + +.logo a img { + max-width: 140px; +} + +/* Tablet < 600p*/ + +@media only screen and (max-width: 600px) {} + +/* Mobile phone < 360p*/ + +@media only screen and (max-width: 360px) { + h1 { + font-size: 20px; + font-size: 1.5rem; + } + h2 { + font-size: 0.88rem; + margin: 0; + } + .logo { + margin: 50px 40px 0; + } + .footer { + font-size: 0.63rem; + text-align: center; + } +} + +input[type=text] { + width: 100%; + box-sizing: border-box; + border: 2px solid #777; + border-radius: 2px; + font-size: 16px; + padding: 12px 20px 12px 20px; + transition: border 250ms ease-in-out; +} + +input[type=text]:focus { + border: 2px solid #ffce73; +} + +.button { + background-color: #ffa500; + margin: 20px 0; + border: none; + border-radius: 2px; + color: #222; + padding: 15px 32px; + text-align: center; + text-decoration: none; + display: inline-block; + font-size: 16px; +} +{{ end }}` + +const logo = `{{ define "logo" }} + +{{ end }}` + +const favicon = `000001000400101000000000200068040000460000002020000000002000a8100000ae0400003030000000002000a825000056150000404000000000200028420000fe3a000028000000100000002000000001002000000000004004000000000000000000000000000000000000ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff017e7e7e0362626263545454c548484849ffffff01ffffff01ffffff01ffffff01646464375b5b5bbf4545457758585809ffffff01ffffff01ffffff0164646443626262cf626262ff535353ff454545ff454545b74949492b6868681d626262a5626262fd5c5c5cff464646ff454545dd47474755ffffff01ffffff013f3f3feb565656ff636363ff535353ff464646ff3f3f3fff373737ab393939894d4d4dff626262ff5c5c5cff464646ff424242ff3a3a3af7ffffff01ffffff01383838e9353535ff424242ff474747ff383838ff353535ff363636ab35353587363636ff3a3a3aff4a4a4aff3b3b3bff353535ff363636f5ffffff01ffffff01383838e9303030ff181818ff131313ff232323ff343434ff363636ab35353587343434ff202020ff101010ff1d1d1dff303030ff373737f5ffffff01ffffff01232323c50c0c0cff0d0d0dff131313ff171717ff171717ff2929298b2727276b0f0f0ffd0d0d0dff101010ff171717ff161616ff232323d9ffffff01ffffff014d4d4d030f0f0f650c0c0ce7131313ff161616d51d1d1d4b63636363464646691717173b0d0d0dc50f0f0fff161616ef171717752e2e2e07ffffff01ffffff01ffffff01ffffff011d1d1d0f1515155360606045626262cf636363ff464646ff454545d3484848491414144d24242417ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff013c3c3c374f4f4fff636363ff636363ff464646ff464646ff3f3f3fff3c3c3c41ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff013636363d353535ff3c3c3cff575757ff363636ff181818ff282828ff37373747ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff013636363d363636ff303030ff181818ff292929ff131313ef17171771696969136565653bffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01323232371e1e1eff0d0d0dff0c0c0cff363636ff363636a3ffffff0185858515606060ff4747476bffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01111111450d0d0dd10c0c0cff1b1b1bff2a2a2a993e3e3e0b30303085292929ff37373787ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01363636030e0e0e671616166b45454505323232432e2e2ed9151515c31d1d1d2dffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff014e4e4e05ffffff01ffffff01ffffff01ffffff010000ffff0000ffff0000ffff0000ffff0000ffff0000ffff0000ffff0000ffff0000ffff0000ffff0000ffff0000ffff0000ffff0000ffff0000ffff0000ffff28000000200000004000000001002000000000008010000000000000000000000000000000000000ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff017272721b646464a54646466f72727205ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff0168686845575757b74f4f4f39ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff017e7e7e0b6262627d616161f3636363ff424242ff444444d74f4f4f49ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff016c6c6c27636363b5616161ff555555ff434343ff464646a35858581dffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff016666665d616161e3626262ff636363ff636363ff444444ff464646ff434343ff454545b95252522bffffff01ffffff01ffffff01ffffff016c6c6c1363636393616161fb636363ff636363ff555555ff464646ff464646ff444444f5464646836666660bffffff01ffffff01ffffff01ffffff01ffffff016a6a6a3f626262c9616161ff636363ff636363ff636363ff636363ff444444ff464646ff464646ff464646ff434343fb48484897545454135b5b5b036868686f616161ef626262ff636363ff636363ff636363ff555555ff464646ff464646ff464646ff454545ff444444e54a4a4a5fffffff01ffffff01ffffff01ffffff013b3b3bd7505050ff646464ff636363ff636363ff636363ff636363ff444444ff464646ff464646ff464646ff454545ff3a3a3aff33333357313131113c3c3cff5a5a5aff646464ff636363ff636363ff636363ff555555ff464646ff464646ff464646ff464646ff424242ff383838f1ffffff01ffffff01ffffff01ffffff013a3a3ad5353535ff3a3a3aff575757ff646464ff626262ff636363ff444444ff464646ff464646ff3d3d3dff353535ff363636ff3636365535353511363636ff343434ff434343ff606060ff636363ff636363ff555555ff464646ff464646ff444444ff393939ff353535ff373737edffffff01ffffff01ffffff01ffffff013a3a3ad5363636ff363636ff343434ff3f3f3fff5d5d5dff646464ff444444ff404040ff363636ff353535ff363636ff363636ff3636365535353511363636ff363636ff363636ff343434ff4a4a4aff636363ff555555ff454545ff3c3c3cff353535ff363636ff363636ff373737edffffff01ffffff01ffffff01ffffff013a3a3ad5363636ff363636ff363636ff363636ff353535ff3f3f3fff363636ff353535ff363636ff363636ff363636ff363636ff3636365535353511363636ff363636ff363636ff363636ff353535ff383838ff3a3a3aff373737ff353535ff363636ff363636ff363636ff373737edffffff01ffffff01ffffff01ffffff013a3a3ad5363636ff363636ff363636ff323232ff181818ff0e0e0eff171717ff282828ff373737ff363636ff363636ff363636ff3636365535353511363636ff363636ff353535ff373737ff292929ff0f0f0fff111111ff1b1b1bff2f2f2fff373737ff363636ff363636ff373737edffffff01ffffff01ffffff01ffffff013a3a3ad5363636ff363636ff1e1e1eff0b0b0bff0d0d0dff0f0f0fff171717ff161616ff191919ff2c2c2cff373737ff363636ff3636365535353511363636ff373737ff2f2f2fff141414ff0b0b0bff0d0d0dff131313ff171717ff151515ff1f1f1fff333333ff363636ff373737edffffff01ffffff01ffffff01ffffff013b3b3bd5252525ff0d0d0dff0c0c0cff0d0d0dff0d0d0dff0f0f0fff171717ff171717ff171717ff151515ff1c1c1cff313131ff3535355734343411333333ff1a1a1aff0b0b0bff0d0d0dff0d0d0dff0d0d0dff131313ff171717ff171717ff171717ff161616ff242424ff373737efffffff01ffffff01ffffff01ffffff012020205d0b0b0be50b0b0bff0d0d0dff0d0d0dff0d0d0dff0f0f0fff171717ff171717ff171717ff171717ff131313ff161616b73333331f3b3b3b05111111970a0a0afb0d0d0dff0d0d0dff0d0d0dff0d0d0dff131313ff171717ff171717ff171717ff161616ff141414f51c1c1c7fffffff01ffffff01ffffff01ffffff01ffffff014d4d4d0b1212127f0a0a0af50d0d0dff0d0d0dff0f0f0fff171717ff171717ff151515ff151515d522222249ffffff017373731b51515121ffffff011d1d1d2b101010b50a0a0aff0d0d0dff0d0d0dff131313ff171717ff171717ff131313ff181818a12e2e2e1dffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff012c2c2c1b0f0f0fa10a0a0afd0f0f0fff161616ff141414e91b1b1b69656565057878780b6363637b626262f3464646f7454545896969690fffffff011c1c1c470c0c0cd30b0b0bff131313ff141414ff151515c32a2a2a37ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff011d1d1d35111111bd1a1a1a8d2f2f2f11ffffff0166666659616161e1626262ff646464ff474747ff454545ff444444e9494949677b7b7b054040400517171769131313cd24242455ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff0169696939626262c7616161ff636363ff636363ff646464ff474747ff464646ff464646ff444444ff454545d14e4e4e45ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01424242615e5e5eff636363ff636363ff636363ff636363ff646464ff474747ff464646ff464646ff464646ff464646ff434343ff3f3f3f77ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff0136363679343434ff494949ff636363ff636363ff636363ff646464ff474747ff464646ff464646ff474747ff3d3d3dff353535ff3a3a3a8dffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff0136363679363636ff353535ff363636ff505050ff646464ff636363ff474747ff484848ff2f2f2fff1c1c1cff323232ff363636ff3a3a3a8dffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff0136363679363636ff363636ff363636ff353535ff3a3a3aff5a5a5aff393939ff0f0f0fff040404ff111111ff151515ff232323ff3535358fffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff0136363679363636ff363636ff363636ff363636ff323232ff171717ff2a2a2aff0c0c0cff030303ff111111ff141414fb171717992e2e2e17a3a3a305ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff0136363679363636ff363636ff363636ff1f1f1fff0b0b0bff0d0d0dff363636ff383838ff242424ff121212bf2a2a2a2dffffff01ffffff018484842b636363bf6d6d6d2fffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff0136363679373737ff252525ff0d0d0dff0c0c0cff0d0d0dff0d0d0dff373737ff363636ff353535ff39393949ffffff01ffffff01ffffff0186868629646464ff656565fb6464649b55555505ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff012e2e2e650e0e0eff0c0c0cff0d0d0dff0d0d0dff0d0d0dff0c0c0cff353535ff363636ff353535ff37373749ffffff01ffffff01ffffff0185858529656565ff525252ff353535ff4b4b4b0fffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff011c1c1c430d0d0dcf0b0b0bff0d0d0dff0d0d0dff0d0d0dff171717ff282828ff363636ff37373749ffffff01ffffff01ffffff0144444459363636ff353535ff353535ff4e4e4e0fffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff0162626203161616630b0b0be70c0c0cff0d0d0dff171717ff161616ff171717ed3737372fffffff013e3e3e2b303030b72a2a2aff151515ff262626ff363636ff4b4b4b0fffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff013636360d101010850a0a0af7141414f91717178f45454511ffffff014c4c4c252c2c2cdb303030ff2d2d2dff151515ff131313ff1b1b1bad5a5a5a07ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff012b2b2b2121212127ffffff01ffffff01ffffff01ffffff0161616109313131752b2b2bf1131313cd26262641ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff014e4e4e1359595903ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000028000000300000006000000001002000000000008025000000000000000000000000000000000000ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff0173737357545454997c7c7c11ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff0176767663515151916c6c6c0dffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff017676762d636363bb636363ff4d4d4dff434343eb4f4f4f6d7f7f7f05ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff0176767635616161c3626262ff494949ff424242e94f4f4f6392929203ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff017e7e7e19626262955f5f5ffd626262ff666666ff4f4f4fff464646ff424242ff434343d75a5a5a49ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff017777771d6464649f5f5f5fff636363ff656565ff4b4b4bff464646ff424242ff444444d158585841ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff018585850966666677606060ef626262ff636363ff636363ff666666ff4f4f4fff464646ff464646ff464646ff414141ff464646b75d5d5d2dffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff018989890d6868687f5f5f5ff5626262ff636363ff636363ff656565ff4b4b4bff464646ff464646ff464646ff404040ff484848b160606027ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff016a6a6a55626262df606060ff636363ff636363ff636363ff636363ff666666ff4f4f4fff464646ff464646ff464646ff464646ff454545ff424242fd484848956a6a6a17ffffff01ffffff01ffffff01ffffff01ffffff016969695f606060e3606060ff636363ff636363ff636363ff636363ff656565ff4b4b4bff464646ff464646ff464646ff464646ff454545ff414141f94a4a4a8d65656513ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff016e6e6e3b656565c15f5f5fff636363ff636363ff636363ff636363ff636363ff636363ff666666ff4f4f4fff464646ff464646ff464646ff464646ff464646ff464646ff444444ff424242ed52525277ffffff01ffffff016c6c6c37676767c95f5f5fff636363ff636363ff636363ff636363ff636363ff636363ff656565ff4b4b4bff464646ff464646ff464646ff464646ff464646ff464646ff434343ff444444e94d4d4d6dffffff01ffffff01ffffff01ffffff01ffffff01ffffff013c3c3cc5454545ff646464ff646464ff636363ff636363ff636363ff636363ff636363ff666666ff4f4f4fff464646ff464646ff464646ff464646ff464646ff464646ff474747ff424242ff333333fb34343409ffffff0131313199494949ff656565ff646464ff636363ff636363ff636363ff636363ff636363ff656565ff4b4b4bff464646ff464646ff464646ff464646ff464646ff464646ff474747ff414141ff373737ebffffff01ffffff01ffffff01ffffff01ffffff01ffffff01404040bf333333ff343434ff4f4f4fff666666ff636363ff636363ff636363ff636363ff666666ff4f4f4fff464646ff464646ff464646ff464646ff474747ff444444ff383838ff343434ff363636f737373707ffffff0135353597343434ff343434ff525252ff666666ff636363ff636363ff636363ff636363ff656565ff4b4b4bff464646ff464646ff464646ff464646ff474747ff444444ff383838ff343434ff393939e3ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01404040bf343434ff363636ff333333ff383838ff585858ff676767ff636363ff636363ff666666ff4f4f4fff464646ff464646ff474747ff464646ff3b3b3bff343434ff363636ff363636ff363636f737373707ffffff0135353597363636ff363636ff333333ff383838ff5a5a5aff666666ff636363ff636363ff656565ff4b4b4bff464646ff464646ff474747ff454545ff3a3a3aff343434ff363636ff363636ff393939e3ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01404040bf343434ff363636ff363636ff363636ff323232ff3d3d3dff5d5d5dff666666ff666666ff4f4f4fff464646ff474747ff3e3e3eff353535ff353535ff363636ff363636ff363636ff363636f737373707ffffff0135353597363636ff363636ff363636ff363636ff313131ff3f3f3fff5f5f5fff666666ff656565ff4b4b4bff464646ff474747ff3d3d3dff353535ff353535ff363636ff363636ff363636ff393939e3ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01404040bf343434ff363636ff363636ff363636ff363636ff353535ff323232ff444444ff676767ff525252ff404040ff363636ff353535ff363636ff363636ff363636ff363636ff363636ff363636f737373707ffffff0135353597363636ff363636ff363636ff363636ff363636ff353535ff323232ff464646ff676767ff4e4e4eff404040ff363636ff353535ff363636ff363636ff363636ff363636ff363636ff393939e3ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01404040bf343434ff363636ff363636ff363636ff363636ff363636ff353535ff383838ff2d2d2dff2b2b2bff373737ff363636ff363636ff363636ff363636ff363636ff363636ff363636ff363636f737373707ffffff0135353597363636ff363636ff363636ff363636ff363636ff363636ff363636ff383838ff2c2c2cff2a2a2aff373737ff363636ff363636ff363636ff363636ff363636ff363636ff363636ff393939e3ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01404040bf343434ff363636ff363636ff363636ff353535ff383838ff343434ff171717ff090909ff151515ff171717ff2d2d2dff383838ff363636ff363636ff363636ff363636ff363636ff363636f737373707ffffff0135353597363636ff363636ff363636ff363636ff353535ff383838ff333333ff151515ff090909ff151515ff181818ff2f2f2fff383838ff363636ff363636ff363636ff363636ff363636ff393939e3ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01404040bf343434ff363636ff363636ff373737ff373737ff1f1f1fff090909ff0c0c0cff0c0c0cff171717ff171717ff141414ff1b1b1bff323232ff383838ff363636ff363636ff363636ff363636f737373707ffffff0135353597363636ff363636ff363636ff373737ff373737ff1d1d1dff0a0a0aff0c0c0cff0c0c0cff171717ff171717ff141414ff1c1c1cff333333ff383838ff353535ff363636ff363636ff393939e3ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01404040bf343434ff363636ff393939ff272727ff0c0c0cff0b0b0bff0d0d0dff0d0d0dff0c0c0cff171717ff171717ff171717ff161616ff141414ff202020ff353535ff373737ff363636ff363636f737373707ffffff0135353597363636ff363636ff383838ff252525ff0b0b0bff0b0b0bff0d0d0dff0d0d0dff0c0c0cff171717ff171717ff171717ff161616ff141414ff222222ff363636ff373737ff363636ff393939e3ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01404040bf383838ff2d2d2dff101010ff0a0a0aff0d0d0dff0d0d0dff0d0d0dff0d0d0dff0c0c0cff171717ff171717ff171717ff171717ff171717ff161616ff141414ff262626ff373737ff373737f737373707ffffff0136363697393939ff2b2b2bff0f0f0fff0a0a0aff0d0d0dff0d0d0dff0d0d0dff0d0d0dff0c0c0cff171717ff171717ff171717ff171717ff171717ff161616ff151515ff272727ff383838ff393939e3ffffff01ffffff01ffffff01ffffff01ffffff01ffffff013a3a3abd131313ff090909ff0d0d0dff0d0d0dff0d0d0dff0d0d0dff0d0d0dff0d0d0dff0c0c0cff171717ff171717ff171717ff171717ff171717ff171717ff171717ff151515ff171717ff262626fb38383807ffffff012a2a2a97121212ff090909ff0d0d0dff0d0d0dff0d0d0dff0d0d0dff0d0d0dff0d0d0dff0c0c0cff171717ff171717ff171717ff171717ff171717ff171717ff171717ff151515ff161616ff2a2a2ae7ffffff01ffffff01ffffff01ffffff01ffffff01ffffff015f5f5f0b1616167b090909ef0a0a0aff0d0d0dff0d0d0dff0d0d0dff0d0d0dff0d0d0dff0c0c0cff171717ff171717ff171717ff171717ff171717ff171717ff171717ff0f0f0fff181818b74040402dffffff01ffffff014646461118181883080808f30b0b0bff0d0d0dff0d0d0dff0d0d0dff0d0d0dff0d0d0dff0c0c0cff171717ff171717ff171717ff171717ff171717ff171717ff161616ff101010ff181818b141414127ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff014d4d4d171212129b090909fd0c0c0cff0d0d0dff0d0d0dff0d0d0dff0c0c0cff171717ff171717ff171717ff171717ff171717ff111111ff141414d335353547ffffff01ffffff01ffffff01ffffff01ffffff01ffffff013838381d131313a5060606ff0d0d0dff0d0d0dff0d0d0dff0d0d0dff0c0c0cff171717ff171717ff171717ff171717ff171717ff111111ff181818cd2e2e2e3dffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01333333310f0f0fbb070707ff0d0d0dff0d0d0dff0c0c0cff171717ff171717ff171717ff141414ff121212e72424246d86868603ffffff01ffffff017373732b656565b9464646c95e5e5e3bffffff01ffffff01ffffff01323232370e0e0ec3080808ff0d0d0dff0d0d0dff0c0c0cff171717ff171717ff171717ff121212ff161616e525252563ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff012525254d0e0e0ed9090909ff0c0c0cff171717ff151515ff121212f91d1d1d894d4d4d13ffffff01ffffff0178787815656565935f5f5ffb646464ff484848ff404040ff454545a96a6a6a1fffffff01ffffff01ffffff011b1b1b570e0e0edf080808ff0d0d0dff171717ff151515ff0f0f0ff3212121815656560dffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01636363071a1a1a710a0a0aed0f0f0fff1b1b1bad2f2f2f23ffffff01ffffff018d8d8d0566666675616161eb616161ff636363ff646464ff484848ff464646ff454545ff424242f54c4c4c856262620fffffff01ffffff014040400b21212179080808f10f0f0fff1b1b1ba15757571dffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff014141411740404037ffffff01ffffff01ffffff016a6a6a4d616161db606060ff636363ff636363ff636363ff646464ff484848ff464646ff464646ff464646ff434343ff434343e751515167ffffff01ffffff01ffffff014646461d30303033ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff0176767631616161c35f5f5fff636363ff636363ff636363ff636363ff636363ff646464ff484848ff464646ff464646ff464646ff464646ff464646ff424242ff454545d158585841ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff015252527f636363ff636363ff636363ff636363ff636363ff636363ff636363ff636363ff646464ff484848ff464646ff464646ff464646ff464646ff464646ff464646ff464646ff434343ff454545a1ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01313131b53b3b3bff5b5b5bff676767ff636363ff636363ff636363ff636363ff636363ff646464ff484848ff464646ff464646ff464646ff464646ff464646ff474747ff444444ff393939ff383838d3ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01363636b3363636ff323232ff404040ff616161ff656565ff626262ff636363ff636363ff646464ff484848ff464646ff464646ff454545ff494949ff474747ff3b3b3bff343434ff353535ff3a3a3ad3ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01363636b3363636ff363636ff353535ff323232ff484848ff656565ff646464ff636363ff646464ff484848ff464646ff474747ff494949ff242424ff282828ff383838ff363636ff363636ff3a3a3ad3ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01363636b3363636ff363636ff363636ff363636ff343434ff343434ff515151ff666666ff656565ff484848ff4b4b4bff323232ff070707ff040404ff151515ff181818ff2f2f2fff383838ff3a3a3ad3ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01363636b3363636ff363636ff363636ff363636ff363636ff363636ff333333ff383838ff5f5f5fff3c3c3cff0f0f0fff020202ff050505ff050505ff171717ff171717ff141414ff1c1c1cff323232d7ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01363636b3363636ff363636ff363636ff363636ff363636ff353535ff383838ff343434ff161616ff2a2a2aff0c0c0cff020202ff050505ff050505ff171717ff171717ff101010ff161616bf2e2e2e35ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01363636b3363636ff363636ff363636ff363636ff373737ff383838ff1f1f1fff0a0a0aff0c0c0cff373737ff3a3a3aff262626ff060606ff040404ff121212ff151515dd30303051ffffff01ffffff01ffffff018787872d6b6b6b47ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01363636b3363636ff363636ff363636ff393939ff272727ff0d0d0dff0b0b0bff0d0d0dff0d0d0dff373737ff363636ff373737ff383838ff1c1c1cf92020207568686807ffffff01ffffff01ffffff01ffffff018686863d5f5f5fff676767af77777721ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01363636b3363636ff393939ff2e2e2eff101010ff0a0a0aff0d0d0dff0d0d0dff0d0d0dff0d0d0dff373737ff363636ff363636ff353535ff373737ebffffff01ffffff01ffffff01ffffff01ffffff01ffffff018686863d626262ff666666ff646464f76969698d9494940fffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01383838b5333333ff161616ff090909ff0d0d0dff0d0d0dff0d0d0dff0d0d0dff0d0d0dff0d0d0dff373737ff363636ff363636ff363636ff353535ebffffff01ffffff01ffffff01ffffff01ffffff01ffffff018686863d626262ff676767ff6b6b6bff555555ff3a3a3a93ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff0125252589030303ff0c0c0cff0d0d0dff0d0d0dff0d0d0dff0d0d0dff0d0d0dff0d0d0dff0c0c0cff333333ff383838ff353535ff363636ff353535ebffffff01ffffff01ffffff01ffffff01ffffff01ffffff018585853d666666ff5f5f5fff3c3c3cff313131ff3a3a3a93ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff012d2d2d3f0e0e0ecb080808ff0d0d0dff0d0d0dff0d0d0dff0d0d0dff0d0d0dff0d0d0dff141414ff222222ff363636ff373737ff353535ebffffff01ffffff01ffffff01ffffff01ffffff01ffffff0177777741414141ff313131ff363636ff353535ff3a3a3a93ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff011e1e1e5f0a0a0ae50a0a0aff0d0d0dff0d0d0dff0d0d0dff0d0d0dff171717ff161616ff151515ff282828ff353535f3ffffff01ffffff01ffffff01ffffff016e6e6e0b37373781242424f1191919ff333333ff383838ff343434ff3a3a3a93ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff015a5a5a0d1919197f0a0a0af30b0b0bff0d0d0dff0d0d0dff171717ff171717ff161616ff0f0f0ffb24242489ffffff01ffffff01ffffff013e3e3e5d2d2d2de52e2e2eff2b2b2bff151515ff141414ff212121ff363636ff3b3b3b95ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff013636361b111111a3080808ff0c0c0cff181818ff0f0f0fff171717b545454525ffffff01ffffff017f7f7f05363636c7282828ff313131ff313131ff2b2b2bff151515ff171717ff161616ff0c0c0cfb3434346bffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01303030350f0f0fc7121212d337373741ffffff01ffffff01ffffff01ffffff01ffffff016b6b6b0b3a3a3a7d2c2c2cf12f2f2fff2b2b2bff151515ff101010ff171717bb4646462dffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01515151193535359b242424ff131313d72828284bffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff014e4e4e2b59595905ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01000000000000ffff000000000000ffff000000000000ffff000000000000ffff000000000000ffff000000000000ffff000000000000ffff000000000000ffff000000000000ffff000000000000ffff000000000000ffff000000000000ffff000000000000ffff000000000000ffff000000000000ffff000000000000ffff000000000000ffff000000000000ffff000000000000ffff000000000000ffff000000000000ffff000000000000ffff000000000000ffff000000000000ffff000000000000ffff000000000000ffff000000000000ffff000000000000ffff000000000000ffff000000000000ffff000000000000ffff000000000000ffff000000000000ffff000000000000ffff000000000000ffff000000000000ffff000000000000ffff000000000000ffff000000000000ffff000000000000ffff000000000000ffff000000000000ffff000000000000ffff000000000000ffff000000000000ffff000000000000ffff000000000000ffff000000000000ffff28000000400000008000000001002000000000000042000000000000000000000000000000000000ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff0176767635666666914e4e4e457c7c7c09ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff018080801569696989545454696c6c6c0bffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff018484840d70707061616161d5606060fb3d3d3ddf4e4e4e9172727213ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff017070704d626262b35f5f5ffb464646f1454545a16a6a6a33ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff017676760f67676753646464cf5e5e5eff656565ff626262ff414141ff404040ff444444e54b4b4b7b69696919ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01979797036c6c6c45676767a95d5d5dff616161ff626262ff484848ff424242ff3e3e3efd4e4e4e8958585831ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff017e7e7e2b616161a75f5f5fef616161ff636363ff656565ff626262ff424242ff464646ff444444ff414141fd434343b961616153ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff017777771969696981606060e7606060ff636363ff636363ff626262ff484848ff464646ff454545ff424242fd414141d95656566569696911ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01858585056e6e6e29656565995f5f5ff1616161ff636363ff636363ff636363ff656565ff626262ff424242ff464646ff464646ff464646ff444444ff3f3f3fff484848af5353534b86868607ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01797979216a6a6a6f616161ed5e5e5eff636363ff636363ff636363ff636363ff626262ff484848ff464646ff464646ff464646ff464646ff3e3e3eff474747d75151515762626213ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01838383036f6f6f755f5f5fd3606060ff626262ff636363ff636363ff636363ff636363ff656565ff626262ff424242ff464646ff464646ff464646ff464646ff454545ff434343ff404040e94e4e4e8d5f5f5f1bffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff018f8f8f056b6b6b45616161c95f5f5ff7616161ff636363ff636363ff636363ff636363ff636363ff626262ff484848ff464646ff464646ff464646ff464646ff464646ff444444ff424242f1434343b16666662dffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff017070700f6969695f626262d35e5e5eff626262ff636363ff636363ff636363ff636363ff636363ff636363ff656565ff626262ff424242ff464646ff464646ff464646ff464646ff464646ff464646ff464646ff404040ff444444f14d4d4d776a6a6a23ffffff01ffffff01ffffff01ffffff017b7b7b096c6c6c39636363c15f5f5ffb626262ff636363ff636363ff636363ff636363ff636363ff636363ff636363ff626262ff484848ff464646ff464646ff464646ff464646ff464646ff464646ff464646ff434343ff414141f54a4a4aa35b5b5b2d70707007ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff0171717143676767a7616161f3616161ff636363ff636363ff636363ff636363ff636363ff636363ff636363ff636363ff656565ff626262ff424242ff464646ff464646ff464646ff464646ff464646ff464646ff464646ff464646ff444444ff414141f7474747cd54545447ffffff01ffffff015b5b5b096b6b6b99646464e1606060ff626262ff636363ff636363ff636363ff636363ff636363ff636363ff636363ff636363ff626262ff484848ff464646ff464646ff464646ff464646ff464646ff464646ff464646ff464646ff444444ff424242ff414141d552525277ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01404040b33b3b3bff5c5c5cff656565ff646464ff636363ff636363ff636363ff636363ff636363ff636363ff636363ff656565ff626262ff424242ff464646ff464646ff464646ff464646ff464646ff464646ff464646ff464646ff474747ff454545ff3a3a3aff313131ad34343407ffffff012e2e2e25383838ff535353ff656565ff656565ff636363ff636363ff636363ff636363ff636363ff636363ff636363ff636363ff626262ff484848ff464646ff464646ff464646ff464646ff464646ff464646ff464646ff464646ff474747ff464646ff3b3b3bff3a3a3ae9ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01444444a9313131ff363636ff484848ff636363ff676767ff636363ff636363ff636363ff636363ff636363ff636363ff656565ff626262ff424242ff464646ff464646ff464646ff464646ff464646ff464646ff464646ff464646ff404040ff363636ff343434ff353535a537373705ffffff0135353521333333ff333333ff434343ff5c5c5cff686868ff636363ff636363ff636363ff636363ff636363ff636363ff636363ff626262ff484848ff464646ff464646ff464646ff464646ff464646ff464646ff464646ff484848ff414141ff393939ff313131ff3c3c3cdbffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01444444a9343434ff363636ff323232ff353535ff4b4b4bff636363ff656565ff636363ff626262ff636363ff636363ff656565ff626262ff424242ff464646ff464646ff464646ff464646ff474747ff464646ff414141ff363636ff343434ff363636ff363636ff363636a537373705ffffff0135353521363636ff363636ff333333ff313131ff484848ff5e5e5eff666666ff646464ff626262ff636363ff636363ff636363ff626262ff484848ff464646ff464646ff464646ff464646ff464646ff474747ff424242ff3a3a3aff343434ff353535ff333333ff3d3d3ddbffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01444444a9343434ff363636ff363636ff343434ff333333ff3d3d3dff555555ff686868ff656565ff626262ff636363ff656565ff626262ff424242ff464646ff464646ff464646ff484848ff444444ff393939ff353535ff353535ff363636ff363636ff363636ff363636a537373705ffffff0135353521363636ff363636ff363636ff353535ff323232ff363636ff515151ff646464ff656565ff636363ff636363ff636363ff626262ff484848ff464646ff464646ff464646ff484848ff454545ff3d3d3dff353535ff343434ff363636ff363636ff333333ff3d3d3ddbffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01444444a9343434ff363636ff363636ff363636ff363636ff343434ff303030ff3f3f3fff575757ff666666ff656565ff646464ff626262ff424242ff464646ff474747ff454545ff3a3a3aff343434ff353535ff363636ff363636ff363636ff363636ff363636ff363636a537373705ffffff0135353521363636ff363636ff363636ff363636ff363636ff363636ff303030ff373737ff535353ff636363ff656565ff636363ff626262ff484848ff464646ff474747ff454545ff3e3e3eff353535ff343434ff363636ff363636ff363636ff363636ff333333ff3d3d3ddbffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01444444a9343434ff363636ff363636ff363636ff363636ff363636ff363636ff333333ff333333ff484848ff606060ff696969ff626262ff434343ff474747ff3e3e3eff363636ff353535ff353535ff363636ff363636ff363636ff363636ff363636ff363636ff363636a537373705ffffff0135353521363636ff363636ff363636ff363636ff363636ff363636ff353535ff343434ff333333ff3e3e3eff5d5d5dff686868ff626262ff484848ff474747ff424242ff373737ff353535ff353535ff363636ff363636ff363636ff363636ff363636ff333333ff3d3d3ddbffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01444444a9343434ff363636ff363636ff363636ff363636ff363636ff363636ff363636ff363636ff323232ff323232ff505050ff616161ff3d3d3dff373737ff343434ff353535ff363636ff363636ff363636ff363636ff363636ff363636ff363636ff363636ff363636a537373705ffffff0135353521363636ff363636ff363636ff363636ff363636ff363636ff363636ff363636ff363636ff343434ff313131ff434343ff606060ff464646ff383838ff343434ff363636ff363636ff363636ff363636ff363636ff363636ff363636ff363636ff333333ff3d3d3ddbffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01444444a9343434ff363636ff363636ff363636ff363636ff363636ff363636ff363636ff363636ff363636ff3a3a3aff2b2b2bff1e1e1eff2d2d2dff383838ff373737ff353535ff363636ff363636ff363636ff363636ff363636ff363636ff363636ff363636ff363636a537373705ffffff0135353521363636ff363636ff363636ff363636ff363636ff363636ff363636ff363636ff363636ff363636ff393939ff323232ff1c1c1cff262626ff373737ff383838ff353535ff363636ff363636ff363636ff363636ff363636ff363636ff363636ff333333ff3d3d3ddbffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01444444a9343434ff363636ff363636ff363636ff363636ff363636ff353535ff373737ff383838ff303030ff191919ff080808ff101010ff141414ff1a1a1aff303030ff383838ff363636ff363636ff363636ff363636ff363636ff363636ff363636ff363636ff363636a537373705ffffff0135353521363636ff363636ff363636ff363636ff363636ff363636ff353535ff363636ff383838ff363636ff1d1d1dff0b0b0bff0c0c0cff141414ff181818ff292929ff373737ff373737ff363636ff363636ff363636ff363636ff363636ff363636ff333333ff3d3d3ddbffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01444444a9343434ff363636ff363636ff363636ff363636ff353535ff393939ff363636ff222222ff0c0c0cff0a0a0aff0c0c0cff121212ff171717ff151515ff161616ff212121ff353535ff393939ff363636ff363636ff363636ff363636ff363636ff363636ff363636a537373705ffffff0135353521363636ff363636ff363636ff363636ff363636ff353535ff383838ff3a3a3aff262626ff121212ff0a0a0aff0c0c0cff0f0f0fff171717ff151515ff151515ff1e1e1eff2f2f2fff3a3a3aff363636ff363636ff363636ff363636ff363636ff333333ff3d3d3ddbffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01444444a9343434ff363636ff363636ff363636ff383838ff363636ff262626ff0d0d0dff090909ff0d0d0dff0d0d0dff0c0c0cff121212ff171717ff171717ff171717ff141414ff151515ff232323ff353535ff383838ff363636ff353535ff363636ff363636ff363636a537373705ffffff0135353521363636ff363636ff363636ff353535ff383838ff383838ff292929ff131313ff080808ff0c0c0cff0d0d0dff0d0d0dff0f0f0fff171717ff171717ff171717ff151515ff131313ff202020ff313131ff383838ff363636ff363636ff363636ff333333ff3d3d3ddbffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01444444a9343434ff353535ff363636ff3a3a3aff2e2e2eff131313ff0a0a0aff0b0b0bff0d0d0dff0d0d0dff0d0d0dff0c0c0cff121212ff171717ff171717ff171717ff171717ff161616ff141414ff1a1a1aff2a2a2aff393939ff373737ff363636ff363636ff363636a537373705ffffff0135353521363636ff363636ff363636ff3a3a3aff313131ff1c1c1cff0a0a0aff0a0a0aff0d0d0dff0d0d0dff0d0d0dff0d0d0dff0f0f0fff171717ff171717ff171717ff171717ff161616ff151515ff161616ff282828ff363636ff383838ff363636ff333333ff3d3d3ddbffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01444444a9353535ff383838ff313131ff151515ff080808ff0b0b0bff0d0d0dff0d0d0dff0d0d0dff0d0d0dff0d0d0dff0c0c0cff121212ff171717ff171717ff171717ff171717ff171717ff171717ff161616ff131313ff1b1b1bff2d2d2dff373737ff373737ff363636a537373705ffffff0134343421363636ff383838ff333333ff1e1e1eff090909ff0a0a0aff0d0d0dff0d0d0dff0d0d0dff0d0d0dff0d0d0dff0d0d0dff0f0f0fff171717ff171717ff171717ff171717ff171717ff171717ff171717ff131313ff171717ff2a2a2aff363636ff353535ff3d3d3ddbffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01444444af353535ff1e1e1eff0d0d0dff0a0a0aff0c0c0cff0d0d0dff0d0d0dff0d0d0dff0d0d0dff0d0d0dff0d0d0dff0c0c0cff121212ff171717ff171717ff171717ff171717ff171717ff171717ff171717ff171717ff151515ff151515ff222222ff333333ff353535ad30303007ffffff0134343423373737ff282828ff0d0d0dff0a0a0aff0c0c0cff0d0d0dff0d0d0dff0d0d0dff0d0d0dff0d0d0dff0d0d0dff0d0d0dff0f0f0fff171717ff171717ff171717ff171717ff171717ff171717ff171717ff171717ff151515ff141414ff1b1b1bff2e2e2eff3e3e3ee1ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff013e3e3e6f0f0f0fd5040404ff0b0b0bff0d0d0dff0d0d0dff0d0d0dff0d0d0dff0d0d0dff0d0d0dff0d0d0dff0d0d0dff0c0c0cff121212ff171717ff171717ff171717ff171717ff171717ff171717ff171717ff171717ff171717ff171717ff101010ff0e0e0ee72f2f2f7347474703ffffff013b3b3b13141414cd050505f70a0a0aff0d0d0dff0d0d0dff0d0d0dff0d0d0dff0d0d0dff0d0d0dff0d0d0dff0d0d0dff0d0d0dff0f0f0fff171717ff171717ff171717ff171717ff171717ff171717ff171717ff171717ff171717ff171717ff121212ff0c0c0cf12a2a2aa5ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff015f5f5f052020202b1a1a1aa1080808f1070707ff0d0d0dff0d0d0dff0d0d0dff0d0d0dff0d0d0dff0d0d0dff0d0d0dff0c0c0cff121212ff171717ff171717ff171717ff171717ff171717ff171717ff171717ff171717ff141414ff0c0c0cff212121af2a2a2a496d6d6d07ffffff01ffffff01ffffff01333333231d1d1d730b0b0beb060606ff0d0d0dff0d0d0dff0d0d0dff0d0d0dff0d0d0dff0d0d0dff0d0d0dff0d0d0dff0f0f0fff171717ff171717ff171717ff171717ff171717ff171717ff171717ff171717ff151515ff0e0e0eff181818d72626265546464615ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff014d4d4d29121212af080808ef0a0a0aff0c0c0cff0d0d0dff0d0d0dff0d0d0dff0d0d0dff0c0c0cff121212ff171717ff171717ff171717ff171717ff171717ff171717ff141414ff121212f9141414b93b3b3b4fffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff0138383819151515890a0a0ae5080808ff0c0c0cff0d0d0dff0d0d0dff0d0d0dff0d0d0dff0d0d0dff0f0f0fff171717ff171717ff171717ff171717ff171717ff171717ff161616ff101010fb151515d72c2c2c614444440dffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff0133333311262626510f0f0fd7050505ff0d0d0dff0d0d0dff0d0d0dff0d0d0dff0c0c0cff121212ff171717ff171717ff171717ff171717ff171717ff101010ff141414e7242424733a3a3a19ffffff01ffffff01ffffff01878787097272725f4d4d4d736a6a6a11ffffff01ffffff01ffffff016060600524242445191919ad040404ff0a0a0aff0d0d0dff0d0d0dff0d0d0dff0d0d0dff0f0f0fff171717ff171717ff171717ff171717ff171717ff111111ff0e0e0efd242424873232322dffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff015c5c5c0d2525255f090909d7080808fb0b0b0bff0d0d0dff0c0c0cff121212ff171717ff171717ff161616ff121212ff121212df2121218965656511ffffff01ffffff01ffffff018080800d6767674b646464d1606060ff454545ff464646df4f4f4f6165656517ffffff01ffffff01ffffff01ffffff012d2d2d4b101010b5060606fb0a0a0aff0d0d0dff0d0d0dff0f0f0fff171717ff171717ff161616ff131313ff101010ef2020209d4242422dffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff012c2c2c2d1f1f1f83080808fb080808ff0d0d0dff121212ff171717ff141414ff0f0f0ff91e1e1eb12c2c2c354d4d4d09ffffff01ffffff01ffffff0178787825646464a75f5f5feb616161ff656565ff4a4a4aff414141ff424242f3414141bd69696937ffffff01ffffff01ffffff01ffffff0142424219171717710d0d0de3060606ff0c0c0cff0f0f0fff171717ff151515ff0d0d0dff171717c3292929575656560dffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff013737372d1212129d080808ef0d0d0dff121212f5191919bf2e2e2e3d70707003ffffff01ffffff018c8c8c037676762564646497606060ed606060ff636363ff636363ff656565ff4a4a4aff444444ff464646ff444444ff404040f74a4a4aad5555553162626207ffffff01ffffff01ffffff014040401125252589090909dd0a0a0aff121212ff141414c738383869ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff015b5b5b0b1f1f1f591d1d1daf292929673f3f3f19ffffff01ffffff01ffffff01ffffff016d6d6d715f5f5fcd606060ff626262ff636363ff636363ff636363ff656565ff4a4a4aff444444ff464646ff464646ff454545ff434343ff414141db4f4f4f857b7b7b11ffffff01ffffff01ffffff0153535307222222331d1d1da91b1b1b8d4141412365656503ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff017c7c7c0f6868685d636363cb5e5e5eff626262ff636363ff636363ff636363ff636363ff636363ff656565ff4a4a4aff444444ff464646ff464646ff464646ff464646ff464646ff404040ff454545e14c4c4c6b69696917ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff0177777733626262a3606060f3616161ff636363ff636363ff636363ff636363ff636363ff636363ff636363ff656565ff4a4a4aff444444ff464646ff464646ff464646ff464646ff464646ff464646ff444444ff424242f9454545b55d5d5d49ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff014b4b4b0f5e5e5e85626262ff626262ff636363ff636363ff636363ff636363ff636363ff636363ff636363ff636363ff636363ff656565ff4a4a4aff444444ff464646ff464646ff464646ff464646ff464646ff464646ff464646ff464646ff444444ff414141ff454545a16464641dffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff0132323225333333cf4e4e4eff646464ff666666ff636363ff636363ff636363ff636363ff636363ff636363ff636363ff636363ff656565ff4a4a4aff444444ff464646ff464646ff464646ff464646ff464646ff464646ff464646ff464646ff474747ff404040ff303030e35757573bffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff0137373723363636cd313131ff363636ff515151ff636363ff656565ff636363ff636363ff636363ff636363ff636363ff636363ff656565ff4a4a4aff444444ff464646ff464646ff464646ff464646ff464646ff464646ff464646ff414141ff373737ff343434ff323232e159595939ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff0137373723363636cd363636ff343434ff333333ff3c3c3cff5b5b5bff686868ff636363ff626262ff636363ff636363ff636363ff656565ff4a4a4aff444444ff464646ff464646ff454545ff464646ff4c4c4cff454545ff393939ff353535ff353535ff353535ff323232e159595939ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff0137373723363636cd363636ff363636ff363636ff353535ff313131ff3f3f3fff5d5d5dff666666ff646464ff626262ff636363ff656565ff4a4a4aff444444ff454545ff474747ff4a4a4aff404040ff212121ff2f2f2fff373737ff373737ff353535ff363636ff323232e159595939ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff0137373723363636cd363636ff363636ff363636ff363636ff353535ff333333ff363636ff484848ff646464ff676767ff626262ff656565ff4a4a4aff444444ff4b4b4bff4a4a4aff262626ff0b0b0bff090909ff171717ff252525ff353535ff393939ff363636ff323232e159595939ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff0137373723363636cd363636ff363636ff363636ff363636ff363636ff363636ff363636ff323232ff363636ff4c4c4cff646464ff676767ff4d4d4dff484848ff2c2c2cff0b0b0bff020202ff040404ff0b0b0bff171717ff141414ff161616ff282828ff353535ff343434e359595939ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff0137373723363636cd363636ff363636ff363636ff363636ff363636ff363636ff363636ff363636ff343434ff323232ff3f3f3fff5f5f5fff3a3a3aff161616ff030303ff030303ff050505ff040404ff0b0b0bff171717ff171717ff161616ff151515ff1a1a1aff242424e55555553bffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff0137373723363636cd363636ff363636ff363636ff363636ff363636ff363636ff363636ff353535ff363636ff383838ff2e2e2eff191919ff262626ff111111ff030303ff030303ff050505ff040404ff0b0b0bff171717ff171717ff151515ff111111f9121212cd272727557d7d7d09ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff0137373723363636cd363636ff363636ff363636ff363636ff363636ff363636ff363636ff383838ff373737ff242424ff0b0b0bff0a0a0aff393939ff393939ff222222ff080808ff020202ff030303ff0b0b0bff181818ff0f0f0fff151515f32424247935353525ffffff01ffffff01ffffff01a3a3a30fffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff0137373723363636cd363636ff363636ff363636ff363636ff363636ff383838ff373737ff272727ff0c0c0cff090909ff0c0c0cff0e0e0eff373737ff363636ff3a3a3aff393939ff1e1e1eff080808ff080808ff0f0f0feb232323914040401dffffff01ffffff01ffffff01ffffff01ffffff018282825d626262c36d6d6d4d8d8d8d09ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff0137373723363636cd363636ff363636ff353535ff363636ff3a3a3aff2f2f2fff131313ff0b0b0bff0b0b0bff0d0d0dff0c0c0cff0e0e0eff373737ff363636ff353535ff363636ff393939ff303030ff1c1c1cc92626264d68686807ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01868686515e5e5eff646464e9696969957878781fffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff0137373723363636cd363636ff373737ff383838ff313131ff161616ff090909ff0b0b0bff0d0d0dff0d0d0dff0d0d0dff0c0c0cff0e0e0eff373737ff363636ff363636ff363636ff353535ff353535ff3c3c3c8fffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff0186868651616161ff676767ff646464ff656565f16a6a6a7d7f7f7f25ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff0137373723353535cd393939ff373737ff1f1f1fff0d0d0dff0a0a0aff0c0c0cff0d0d0dff0d0d0dff0d0d0dff0d0d0dff0c0c0cff0e0e0eff373737ff363636ff363636ff363636ff363636ff353535ff37373791ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff0186868651616161ff676767ff666666ff676767ff686868f9555555cd55555511ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff0134343425323232cf212121ff0e0e0eff090909ff0d0d0dff0d0d0dff0d0d0dff0d0d0dff0d0d0dff0d0d0dff0d0d0dff0c0c0cff0e0e0eff383838ff363636ff363636ff363636ff363636ff353535ff37373791ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff0186868651616161ff686868ff696969ff5f5f5fff3d3d3dff303030ff4848481dffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01474747132323238f020202ff080808ff0d0d0dff0d0d0dff0d0d0dff0d0d0dff0d0d0dff0d0d0dff0d0d0dff0d0d0dff0c0c0cff0c0c0cff2e2e2eff393939ff363636ff353535ff363636ff353535ff37373791ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff0185858551666666ff676767ff494949ff353535ff323232ff353535ff4e4e4e1bffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff0130303045101010af080808f70a0a0aff0d0d0dff0d0d0dff0d0d0dff0d0d0dff0d0d0dff0d0d0dff0c0c0cff0d0d0dff131313ff1c1c1cff303030ff373737ff363636ff353535ff37373791ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff0181818151494949ff363636ff313131ff363636ff353535ff363636ff4e4e4e1bffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff0141414113191919690f0f0fdb060606ff0c0c0cff0d0d0dff0d0d0dff0d0d0dff0d0d0dff0c0c0cff0d0d0dff171717ff151515ff161616ff222222ff363636ff383838ff37373791ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff014d4d4d53272727c1242424ff373737ff373737ff353535ff353535ff363636ff4e4e4e1bffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01626262091c1c1c830b0b0bd7090909ff0c0c0cff0d0d0dff0d0d0dff0c0c0cff0d0d0dff171717ff171717ff171717ff141414ff151515ff202020ff35353595ffffff01ffffff01ffffff01ffffff017474740540404049343434af2a2a2aff262626ff101010ff191919ff2e2e2eff373737ff363636ff363636ff4e4e4e1bffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff015a5a5a073636362d141414a7080808f5080808ff0d0d0dff0c0c0cff0d0d0dff171717ff171717ff171717ff151515ff0e0e0efb1b1b1bbb3d3d3d29ffffff01ffffff01ffffff0151515119393939892a2a2ae92d2d2dff323232ff282828ff141414ff151515ff151515ff1f1f1fff343434ff393939ff4949491dffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff013636362f111111b5070707f30a0a0aff0d0d0dff171717ff141414ff111111f5111111c74343433d70707005ffffff01ffffff017c7c7c034e4e4e632a2a2af7292929ff323232ff313131ff323232ff282828ff141414ff171717ff171717ff151515ff0e0e0efd222222e153535315ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff012d2d2d151f1f1f590e0e0edb040404ff0f0f0fff171717e7262626673f3f3f1dffffff01ffffff01ffffff01ffffff01ffffff01444444293535358b2d2d2deb2b2b2bff313131ff323232ff282828ff141414ff171717ff121212ff0d0d0dff2222229d2626263dbebebe03ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01505050112626266f1d1d1d7f36363617ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01616161213333339d2c2c2ce92f2f2fff282828ff111111ff111111f7191919ab3c3c3c41ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff015151510b3b3b3b43383838c51f1f1fff141414d71e1e1e654f4f4f13ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff015858580b4d4d4d4159595909ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000` diff --git a/swarm/api/manifest.go b/swarm/api/manifest.go index 28597636eed6..a1329a800fe2 100644 --- a/swarm/api/manifest.go +++ b/swarm/api/manifest.go @@ -18,6 +18,7 @@ package api import ( "bytes" + "context" "encoding/json" "errors" "fmt" @@ -45,13 +46,14 @@ type Manifest struct { // ManifestEntry represents an entry in a swarm manifest type ManifestEntry struct { - Hash string `json:"hash,omitempty"` - Path string `json:"path,omitempty"` - ContentType string `json:"contentType,omitempty"` - Mode int64 `json:"mode,omitempty"` - Size int64 `json:"size,omitempty"` - ModTime time.Time `json:"mod_time,omitempty"` - Status int `json:"status,omitempty"` + Hash string `json:"hash,omitempty"` + Path string `json:"path,omitempty"` + ContentType string `json:"contentType,omitempty"` + Mode int64 `json:"mode,omitempty"` + Size int64 `json:"size,omitempty"` + ModTime time.Time `json:"mod_time,omitempty"` + Status int `json:"status,omitempty"` + Access *AccessEntry `json:"access,omitempty"` } // ManifestList represents the result of listing files in a manifest @@ -61,20 +63,20 @@ type ManifestList struct { } // NewManifest creates and stores a new, empty manifest -func (a *API) NewManifest(toEncrypt bool) (storage.Address, error) { +func (a *API) NewManifest(ctx context.Context, toEncrypt bool) (storage.Address, error) { var manifest Manifest data, err := json.Marshal(&manifest) if err != nil { return nil, err } - key, wait, err := a.Store(bytes.NewReader(data), int64(len(data)), toEncrypt) - wait() + key, wait, err := a.Store(ctx, bytes.NewReader(data), int64(len(data)), toEncrypt) + wait(ctx) return key, err } // Manifest hack for supporting Mutable Resource Updates from the bzz: scheme // see swarm/api/api.go:API.Get() for more information -func (a *API) NewResourceManifest(resourceAddr string) (storage.Address, error) { +func (a *API) NewResourceManifest(ctx context.Context, resourceAddr string) (storage.Address, error) { var manifest Manifest entry := ManifestEntry{ Hash: resourceAddr, @@ -85,7 +87,7 @@ func (a *API) NewResourceManifest(resourceAddr string) (storage.Address, error) if err != nil { return nil, err } - key, _, err := a.Store(bytes.NewReader(data), int64(len(data)), false) + key, _, err := a.Store(ctx, bytes.NewReader(data), int64(len(data)), false) return key, err } @@ -96,8 +98,8 @@ type ManifestWriter struct { quitC chan bool } -func (a *API) NewManifestWriter(addr storage.Address, quitC chan bool) (*ManifestWriter, error) { - trie, err := loadManifest(a.fileStore, addr, quitC) +func (a *API) NewManifestWriter(ctx context.Context, addr storage.Address, quitC chan bool) (*ManifestWriter, error) { + trie, err := loadManifest(ctx, a.fileStore, addr, quitC, NOOPDecrypt) if err != nil { return nil, fmt.Errorf("error loading manifest %s: %s", addr, err) } @@ -105,14 +107,18 @@ func (a *API) NewManifestWriter(addr storage.Address, quitC chan bool) (*Manifes } // AddEntry stores the given data and adds the resulting key to the manifest -func (m *ManifestWriter) AddEntry(data io.Reader, e *ManifestEntry) (storage.Address, error) { - - key, _, err := m.api.Store(data, e.Size, m.trie.encrypted) - if err != nil { - return nil, err - } +func (m *ManifestWriter) AddEntry(ctx context.Context, data io.Reader, e *ManifestEntry) (key storage.Address, err error) { entry := newManifestTrieEntry(e, nil) - entry.Hash = key.Hex() + if data != nil { + key, _, err = m.api.Store(ctx, data, e.Size, m.trie.encrypted) + if err != nil { + return nil, err + } + entry.Hash = key.Hex() + } + if entry.Hash == "" { + return key, errors.New("missing entry hash") + } m.trie.addEntry(entry, m.quitC) return key, nil } @@ -136,8 +142,8 @@ type ManifestWalker struct { quitC chan bool } -func (a *API) NewManifestWalker(addr storage.Address, quitC chan bool) (*ManifestWalker, error) { - trie, err := loadManifest(a.fileStore, addr, quitC) +func (a *API) NewManifestWalker(ctx context.Context, addr storage.Address, decrypt DecryptFunc, quitC chan bool) (*ManifestWalker, error) { + trie, err := loadManifest(ctx, a.fileStore, addr, quitC, decrypt) if err != nil { return nil, fmt.Errorf("error loading manifest %s: %s", addr, err) } @@ -159,7 +165,7 @@ func (m *ManifestWalker) Walk(walkFn WalkFn) error { } func (m *ManifestWalker) walk(trie *manifestTrie, prefix string, walkFn WalkFn) error { - for _, entry := range trie.entries { + for _, entry := range &trie.entries { if entry == nil { continue } @@ -189,6 +195,7 @@ type manifestTrie struct { entries [257]*manifestTrieEntry // indexed by first character of basePath, entries[256] is the empty basePath entry ref storage.Address // if ref != nil, it is stored encrypted bool + decrypt DecryptFunc } func newManifestTrieEntry(entry *ManifestEntry, subtrie *manifestTrie) *manifestTrieEntry { @@ -204,18 +211,18 @@ type manifestTrieEntry struct { subtrie *manifestTrie } -func loadManifest(fileStore *storage.FileStore, hash storage.Address, quitC chan bool) (trie *manifestTrie, err error) { // non-recursive, subtrees are downloaded on-demand +func loadManifest(ctx context.Context, fileStore *storage.FileStore, hash storage.Address, quitC chan bool, decrypt DecryptFunc) (trie *manifestTrie, err error) { // non-recursive, subtrees are downloaded on-demand log.Trace("manifest lookup", "key", hash) // retrieve manifest via FileStore - manifestReader, isEncrypted := fileStore.Retrieve(hash) + manifestReader, isEncrypted := fileStore.Retrieve(ctx, hash) log.Trace("reader retrieved", "key", hash) - return readManifest(manifestReader, hash, fileStore, isEncrypted, quitC) + return readManifest(manifestReader, hash, fileStore, isEncrypted, quitC, decrypt) } -func readManifest(manifestReader storage.LazySectionReader, hash storage.Address, fileStore *storage.FileStore, isEncrypted bool, quitC chan bool) (trie *manifestTrie, err error) { // non-recursive, subtrees are downloaded on-demand +func readManifest(mr storage.LazySectionReader, hash storage.Address, fileStore *storage.FileStore, isEncrypted bool, quitC chan bool, decrypt DecryptFunc) (trie *manifestTrie, err error) { // non-recursive, subtrees are downloaded on-demand // TODO check size for oversized manifests - size, err := manifestReader.Size(quitC) + size, err := mr.Size(mr.Context(), quitC) if err != nil { // size == 0 // can't determine size means we don't have the root chunk log.Trace("manifest not found", "key", hash) @@ -228,7 +235,7 @@ func readManifest(manifestReader storage.LazySectionReader, hash storage.Address return } manifestData := make([]byte, size) - read, err := manifestReader.Read(manifestData) + read, err := mr.Read(manifestData) if int64(read) < size { log.Trace("manifest not found", "key", hash) if err == nil { @@ -253,26 +260,41 @@ func readManifest(manifestReader storage.LazySectionReader, hash storage.Address trie = &manifestTrie{ fileStore: fileStore, encrypted: isEncrypted, + decrypt: decrypt, } for _, entry := range man.Entries { - trie.addEntry(entry, quitC) + err = trie.addEntry(entry, quitC) + if err != nil { + return + } } return } -func (mt *manifestTrie) addEntry(entry *manifestTrieEntry, quitC chan bool) { +func (mt *manifestTrie) addEntry(entry *manifestTrieEntry, quitC chan bool) error { mt.ref = nil // trie modified, hash needs to be re-calculated on demand + if entry.ManifestEntry.Access != nil { + if mt.decrypt == nil { + return errors.New("dont have decryptor") + } + + err := mt.decrypt(&entry.ManifestEntry) + if err != nil { + return err + } + } + if len(entry.Path) == 0 { mt.entries[256] = entry - return + return nil } b := entry.Path[0] oldentry := mt.entries[b] if (oldentry == nil) || (oldentry.Path == entry.Path && oldentry.ContentType != ManifestType) { mt.entries[b] = entry - return + return nil } cpl := 0 @@ -282,12 +304,12 @@ func (mt *manifestTrie) addEntry(entry *manifestTrieEntry, quitC chan bool) { if (oldentry.ContentType == ManifestType) && (cpl == len(oldentry.Path)) { if mt.loadSubTrie(oldentry, quitC) != nil { - return + return nil } entry.Path = entry.Path[cpl:] oldentry.subtrie.addEntry(entry, quitC) oldentry.Hash = "" - return + return nil } commonPrefix := entry.Path[:cpl] @@ -305,10 +327,11 @@ func (mt *manifestTrie) addEntry(entry *manifestTrieEntry, quitC chan bool) { Path: commonPrefix, ContentType: ManifestType, }, subtrie) + return nil } func (mt *manifestTrie) getCountLast() (cnt int, entry *manifestTrieEntry) { - for _, e := range mt.entries { + for _, e := range &mt.entries { if e != nil { cnt++ entry = e @@ -362,7 +385,7 @@ func (mt *manifestTrie) recalcAndStore() error { buffer.WriteString(`{"entries":[`) list := &Manifest{} - for _, entry := range mt.entries { + for _, entry := range &mt.entries { if entry != nil { if entry.Hash == "" { // TODO: paralellize err := entry.subtrie.recalcAndStore() @@ -382,16 +405,31 @@ func (mt *manifestTrie) recalcAndStore() error { } sr := bytes.NewReader(manifest) - key, wait, err2 := mt.fileStore.Store(sr, int64(len(manifest)), mt.encrypted) - wait() + ctx := context.TODO() + key, wait, err2 := mt.fileStore.Store(ctx, sr, int64(len(manifest)), mt.encrypted) + if err2 != nil { + return err2 + } + err2 = wait(ctx) mt.ref = key return err2 } func (mt *manifestTrie) loadSubTrie(entry *manifestTrieEntry, quitC chan bool) (err error) { + if entry.ManifestEntry.Access != nil { + if mt.decrypt == nil { + return errors.New("dont have decryptor") + } + + err := mt.decrypt(&entry.ManifestEntry) + if err != nil { + return err + } + } + if entry.subtrie == nil { hash := common.Hex2Bytes(entry.Hash) - entry.subtrie, err = loadManifest(mt.fileStore, hash, quitC) + entry.subtrie, err = loadManifest(context.TODO(), mt.fileStore, hash, quitC, mt.decrypt) entry.Hash = "" // might not match, should be recalculated } return diff --git a/swarm/api/manifest_test.go b/swarm/api/manifest_test.go index d65f023f85cc..1c8e53c43308 100644 --- a/swarm/api/manifest_test.go +++ b/swarm/api/manifest_test.go @@ -44,7 +44,7 @@ func testGetEntry(t *testing.T, path, match string, multiple bool, paths ...stri quitC := make(chan bool) fileStore := storage.NewFileStore(nil, storage.NewFileStoreParams()) ref := make([]byte, fileStore.HashSize()) - trie, err := readManifest(manifest(paths...), ref, fileStore, false, quitC) + trie, err := readManifest(manifest(paths...), ref, fileStore, false, quitC, NOOPDecrypt) if err != nil { t.Errorf("unexpected error making manifest: %v", err) } @@ -101,7 +101,7 @@ func TestExactMatch(t *testing.T) { mf := manifest("shouldBeExactMatch.css", "shouldBeExactMatch.css.map") fileStore := storage.NewFileStore(nil, storage.NewFileStoreParams()) ref := make([]byte, fileStore.HashSize()) - trie, err := readManifest(mf, ref, fileStore, false, quitC) + trie, err := readManifest(mf, ref, fileStore, false, quitC, nil) if err != nil { t.Errorf("unexpected error making manifest: %v", err) } @@ -134,7 +134,7 @@ func TestAddFileWithManifestPath(t *testing.T) { } fileStore := storage.NewFileStore(nil, storage.NewFileStoreParams()) ref := make([]byte, fileStore.HashSize()) - trie, err := readManifest(reader, ref, fileStore, false, nil) + trie, err := readManifest(reader, ref, fileStore, false, nil, NOOPDecrypt) if err != nil { t.Fatal(err) } @@ -161,7 +161,7 @@ func TestReadManifestOverSizeLimit(t *testing.T) { reader := &storage.LazyTestSectionReader{ SectionReader: io.NewSectionReader(bytes.NewReader(manifest), 0, int64(len(manifest))), } - _, err := readManifest(reader, storage.Address{}, nil, false, nil) + _, err := readManifest(reader, storage.Address{}, nil, false, nil, NOOPDecrypt) if err == nil { t.Fatal("got no error from readManifest") } diff --git a/swarm/api/storage.go b/swarm/api/storage.go index 6ab4af6c4b20..8a48fe5bc004 100644 --- a/swarm/api/storage.go +++ b/swarm/api/storage.go @@ -17,6 +17,7 @@ package api import ( + "context" "path" "github.com/ethereum/go-ethereum/swarm/storage" @@ -45,8 +46,8 @@ func NewStorage(api *API) *Storage { // its content type // // DEPRECATED: Use the HTTP API instead -func (s *Storage) Put(content, contentType string, toEncrypt bool) (storage.Address, func(), error) { - return s.api.Put(content, contentType, toEncrypt) +func (s *Storage) Put(ctx context.Context, content string, contentType string, toEncrypt bool) (storage.Address, func(context.Context) error, error) { + return s.api.Put(ctx, content, contentType, toEncrypt) } // Get retrieves the content from bzzpath and reads the response in full @@ -57,21 +58,21 @@ func (s *Storage) Put(content, contentType string, toEncrypt bool) (storage.Addr // size is resp.Size // // DEPRECATED: Use the HTTP API instead -func (s *Storage) Get(bzzpath string) (*Response, error) { +func (s *Storage) Get(ctx context.Context, bzzpath string) (*Response, error) { uri, err := Parse(path.Join("bzz:/", bzzpath)) if err != nil { return nil, err } - addr, err := s.api.Resolve(uri) + addr, err := s.api.Resolve(ctx, uri.Addr) if err != nil { return nil, err } - reader, mimeType, status, _, err := s.api.Get(addr, uri.Path) + reader, mimeType, status, _, err := s.api.Get(ctx, nil, addr, uri.Path) if err != nil { return nil, err } quitC := make(chan bool) - expsize, err := reader.Size(quitC) + expsize, err := reader.Size(ctx, quitC) if err != nil { return nil, err } @@ -87,16 +88,16 @@ func (s *Storage) Get(bzzpath string) (*Response, error) { // and merge on to it. creating an entry w conentType (mime) // // DEPRECATED: Use the HTTP API instead -func (s *Storage) Modify(rootHash, path, contentHash, contentType string) (newRootHash string, err error) { +func (s *Storage) Modify(ctx context.Context, rootHash, path, contentHash, contentType string) (newRootHash string, err error) { uri, err := Parse("bzz:/" + rootHash) if err != nil { return "", err } - addr, err := s.api.Resolve(uri) + addr, err := s.api.Resolve(ctx, uri.Addr) if err != nil { return "", err } - addr, err = s.api.Modify(addr, path, contentHash, contentType) + addr, err = s.api.Modify(ctx, addr, path, contentHash, contentType) if err != nil { return "", err } diff --git a/swarm/api/storage_test.go b/swarm/api/storage_test.go index 9d23e8f13699..ef96972b68a6 100644 --- a/swarm/api/storage_test.go +++ b/swarm/api/storage_test.go @@ -17,6 +17,7 @@ package api import ( + "context" "testing" ) @@ -31,18 +32,22 @@ func TestStoragePutGet(t *testing.T) { content := "hello" exp := expResponse(content, "text/plain", 0) // exp := expResponse([]byte(content), "text/plain", 0) - bzzkey, wait, err := api.Put(content, exp.MimeType, toEncrypt) + ctx := context.TODO() + bzzkey, wait, err := api.Put(ctx, content, exp.MimeType, toEncrypt) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + err = wait(ctx) if err != nil { t.Fatalf("unexpected error: %v", err) } - wait() bzzhash := bzzkey.Hex() // to check put against the API#Get resp0 := testGet(t, api.api, bzzhash, "") checkResponse(t, resp0, exp) // check storage#Get - resp, err := api.Get(bzzhash) + resp, err := api.Get(context.TODO(), bzzhash) if err != nil { t.Fatalf("unexpected error: %v", err) } diff --git a/swarm/api/uri.go b/swarm/api/uri.go index 14965e0d9343..808517088a54 100644 --- a/swarm/api/uri.go +++ b/swarm/api/uri.go @@ -53,6 +53,19 @@ type URI struct { Path string } +func (u *URI) MarshalJSON() (out []byte, err error) { + return []byte(`"` + u.String() + `"`), nil +} + +func (u *URI) UnmarshalJSON(value []byte) error { + uri, err := Parse(string(value)) + if err != nil { + return err + } + *u = *uri + return nil +} + // Parse parses rawuri into a URI struct, where rawuri is expected to have one // of the following formats: // diff --git a/swarm/bmt/bmt.go b/swarm/bmt/bmt.go index 71aee24955aa..a85d4369e5b5 100644 --- a/swarm/bmt/bmt.go +++ b/swarm/bmt/bmt.go @@ -14,7 +14,7 @@ // You should have received a copy of the GNU Lesser General Public License // along with the go-ethereum library. If not, see . -// Package bmt provides a binary merkle tree implementation +// Package bmt provides a binary merkle tree implementation used for swarm chunk hash package bmt import ( @@ -26,16 +26,16 @@ import ( ) /* -Binary Merkle Tree Hash is a hash function over arbitrary datachunks of limited size +Binary Merkle Tree Hash is a hash function over arbitrary datachunks of limited size. It is defined as the root hash of the binary merkle tree built over fixed size segments -of the underlying chunk using any base hash function (e.g keccak 256 SHA3). -Chunk with data shorter than the fixed size are hashed as if they had zero padding +of the underlying chunk using any base hash function (e.g., keccak 256 SHA3). +Chunks with data shorter than the fixed size are hashed as if they had zero padding. BMT hash is used as the chunk hash function in swarm which in turn is the basis for the 128 branching swarm hash http://swarm-guide.readthedocs.io/en/latest/architecture.html#swarm-hash The BMT is optimal for providing compact inclusion proofs, i.e. prove that a -segment is a substring of a chunk starting at a particular offset +segment is a substring of a chunk starting at a particular offset. The size of the underlying segments is fixed to the size of the base hash (called the resolution of the BMT hash), Using Keccak256 SHA3 hash is 32 bytes, the EVM word size to optimize for on-chain BMT verification as well as the hash size optimal for inclusion proofs in the merkle tree of the swarm hash. @@ -46,17 +46,15 @@ Two implementations are provided: that is simple to understand * Hasher is optimized for speed taking advantage of concurrency with minimalistic control structure to coordinate the concurrent routines - It implements the following interfaces - * standard golang hash.Hash - * SwarmHash - * io.Writer - * TODO: SegmentWriter + + BMT Hasher implements the following interfaces + * standard golang hash.Hash - synchronous, reusable + * SwarmHash - SumWithSpan provided + * io.Writer - synchronous left-to-right datawriter + * AsyncWriter - concurrent section writes and asynchronous Sum call */ const ( - // SegmentCount is the maximum number of segments of the underlying chunk - // Should be equal to max-chunk-data-size / hash-size - SegmentCount = 128 // PoolSize is the maximum number of bmt trees used by the hashers, i.e, // the maximum number of concurrent BMT hashing operations performed by the same hasher PoolSize = 8 @@ -69,7 +67,7 @@ type BaseHasherFunc func() hash.Hash // Hasher a reusable hasher for fixed maximum size chunks representing a BMT // - implements the hash.Hash interface // - reuses a pool of trees for amortised memory allocation and resource control -// - supports order-agnostic concurrent segment writes (TODO:) +// - supports order-agnostic concurrent segment writes and section (double segment) writes // as well as sequential read and write // - the same hasher instance must not be called concurrently on more than one chunk // - the same hasher instance is synchronously reuseable @@ -81,8 +79,7 @@ type Hasher struct { bmt *tree // prebuilt BMT resource for flowcontrol and proofs } -// New creates a reusable Hasher -// implements the hash.Hash interface +// New creates a reusable BMT Hasher that // pulls a new tree from a resource pool for hashing each chunk func New(p *TreePool) *Hasher { return &Hasher{ @@ -90,9 +87,9 @@ func New(p *TreePool) *Hasher { } } -// TreePool provides a pool of trees used as resources by Hasher -// a tree popped from the pool is guaranteed to have clean state -// for hashing a new chunk +// TreePool provides a pool of trees used as resources by the BMT Hasher. +// A tree popped from the pool is guaranteed to have a clean state ready +// for hashing a new chunk. type TreePool struct { lock sync.Mutex c chan *tree // the channel to obtain a resource from the pool @@ -101,7 +98,7 @@ type TreePool struct { SegmentCount int // the number of segments on the base level of the BMT Capacity int // pool capacity, controls concurrency Depth int // depth of the bmt trees = int(log2(segmentCount))+1 - Datalength int // the total length of the data (count * size) + Size int // the total length of the data (count * size) count int // current count of (ever) allocated resources zerohashes [][]byte // lookup table for predictable padding subtrees for all levels } @@ -112,15 +109,12 @@ func NewTreePool(hasher BaseHasherFunc, segmentCount, capacity int) *TreePool { // initialises the zerohashes lookup table depth := calculateDepthFor(segmentCount) segmentSize := hasher().Size() - zerohashes := make([][]byte, depth) + zerohashes := make([][]byte, depth+1) zeros := make([]byte, segmentSize) zerohashes[0] = zeros h := hasher() - for i := 1; i < depth; i++ { - h.Reset() - h.Write(zeros) - h.Write(zeros) - zeros = h.Sum(nil) + for i := 1; i < depth+1; i++ { + zeros = doSum(h, nil, zeros, zeros) zerohashes[i] = zeros } return &TreePool{ @@ -129,7 +123,7 @@ func NewTreePool(hasher BaseHasherFunc, segmentCount, capacity int) *TreePool { SegmentSize: segmentSize, SegmentCount: segmentCount, Capacity: capacity, - Datalength: segmentCount * segmentSize, + Size: segmentCount * segmentSize, Depth: depth, zerohashes: zerohashes, } @@ -158,7 +152,7 @@ func (p *TreePool) reserve() *tree { select { case t = <-p.c: default: - t = newTree(p.SegmentSize, p.Depth) + t = newTree(p.SegmentSize, p.Depth, p.hasher) p.count++ } return t @@ -176,29 +170,28 @@ func (p *TreePool) release(t *tree) { // the tree is 'locked' while not in the pool type tree struct { leaves []*node // leaf nodes of the tree, other nodes accessible via parent links - cur int // index of rightmost currently open segment + cursor int // index of rightmost currently open segment offset int // offset (cursor position) within currently open segment - segment []byte // the rightmost open segment (not complete) section []byte // the rightmost open section (double segment) - depth int // number of levels result chan []byte // result channel - hash []byte // to record the result span []byte // The span of the data subsumed under the chunk } // node is a reuseable segment hasher representing a node in a BMT type node struct { - isLeft bool // whether it is left side of the parent double segment - parent *node // pointer to parent node in the BMT - state int32 // atomic increment impl concurrent boolean toggle - left, right []byte // this is where the content segment is set + isLeft bool // whether it is left side of the parent double segment + parent *node // pointer to parent node in the BMT + state int32 // atomic increment impl concurrent boolean toggle + left, right []byte // this is where the two children sections are written + hasher hash.Hash // preconstructed hasher on nodes } // newNode constructs a segment hasher node in the BMT (used by newTree) -func newNode(index int, parent *node) *node { +func newNode(index int, parent *node, hasher hash.Hash) *node { return &node{ parent: parent, isLeft: index%2 == 0, + hasher: hasher, } } @@ -256,16 +249,21 @@ func (t *tree) draw(hash []byte) string { // newTree initialises a tree by building up the nodes of a BMT // - segment size is stipulated to be the size of the hash -func newTree(segmentSize, depth int) *tree { - n := newNode(0, nil) +func newTree(segmentSize, depth int, hashfunc func() hash.Hash) *tree { + n := newNode(0, nil, hashfunc()) prevlevel := []*node{n} // iterate over levels and creates 2^(depth-level) nodes + // the 0 level is on double segment sections so we start at depth - 2 since count := 2 for level := depth - 2; level >= 0; level-- { nodes := make([]*node, count) for i := 0; i < count; i++ { parent := prevlevel[i/2] - nodes[i] = newNode(i, parent) + var hasher hash.Hash + if level == 0 { + hasher = hashfunc() + } + nodes[i] = newNode(i, parent, hasher) } prevlevel = nodes count *= 2 @@ -273,13 +271,12 @@ func newTree(segmentSize, depth int) *tree { // the datanode level is the nodes on the last level return &tree{ leaves: prevlevel, - result: make(chan []byte, 1), - segment: make([]byte, segmentSize), + result: make(chan []byte), section: make([]byte, 2*segmentSize), } } -// methods needed by hash.Hash +// methods needed to implement hash.Hash // Size returns the size func (h *Hasher) Size() int { @@ -288,227 +285,367 @@ func (h *Hasher) Size() int { // BlockSize returns the block size func (h *Hasher) BlockSize() int { - return h.pool.SegmentSize -} - -// Hash hashes the data and the span using the bmt hasher -func Hash(h *Hasher, span, data []byte) []byte { - h.ResetWithLength(span) - h.Write(data) - return h.Sum(nil) -} - -// Datalength returns the maximum data size that is hashed by the hasher = -// segment count times segment size -func (h *Hasher) DataLength() int { - return h.pool.Datalength + return 2 * h.pool.SegmentSize } -// Sum returns the hash of the buffer +// Sum returns the BMT root hash of the buffer +// using Sum presupposes sequential synchronous writes (io.Writer interface) // hash.Hash interface Sum method appends the byte slice to the underlying // data before it calculates and returns the hash of the chunk // caller must make sure Sum is not called concurrently with Write, writeSection -// and WriteSegment (TODO:) -func (h *Hasher) Sum(b []byte) (r []byte) { - return h.sum(b, true, true) -} - -// sum implements Sum taking parameters -// * if the tree is released right away -// * if sequential write is used (can read sections) -func (h *Hasher) sum(b []byte, release, section bool) (r []byte) { - t := h.bmt - h.finalise(section) - if t.offset > 0 { // get the last node (double segment) - - // padding the segment with zero - copy(t.segment[t.offset:], h.pool.zerohashes[0]) - } - if section { - if t.cur%2 == 1 { - // if just finished current segment, copy it to the right half of the chunk - copy(t.section[h.pool.SegmentSize:], t.segment) - } else { - // copy segment to front of section, zero pad the right half - copy(t.section, t.segment) - copy(t.section[h.pool.SegmentSize:], h.pool.zerohashes[0]) - } - h.writeSection(t.cur, t.section) - } else { - // TODO: h.writeSegment(t.cur, t.segment) - panic("SegmentWriter not implemented") - } - bmtHash := <-t.result +func (h *Hasher) Sum(b []byte) (s []byte) { + t := h.getTree() + // write the last section with final flag set to true + go h.writeSection(t.cursor, t.section, true, true) + // wait for the result + s = <-t.result span := t.span - - if release { - h.releaseTree() - } - // sha3(span + BMT(pure_chunk)) - if span == nil { - return bmtHash + // release the tree resource back to the pool + h.releaseTree() + // b + sha3(span + BMT(pure_chunk)) + if len(span) == 0 { + return append(b, s...) } - bh := h.pool.hasher() - bh.Reset() - bh.Write(span) - bh.Write(bmtHash) - return bh.Sum(b) + return doSum(h.pool.hasher(), b, span, s) } -// Hasher implements the SwarmHash interface +// methods needed to implement the SwarmHash and the io.Writer interfaces -// Hasher implements the io.Writer interface - -// Write fills the buffer to hash, -// with every full segment calls writeSection +// Write calls sequentially add to the buffer to be hashed, +// with every full segment calls writeSection in a go routine func (h *Hasher) Write(b []byte) (int, error) { l := len(b) - if l <= 0 { + if l == 0 || l > h.pool.Size { return 0, nil } - t := h.bmt - need := (h.pool.SegmentCount - t.cur) * h.pool.SegmentSize - if l < need { - need = l - } - // calculate missing bit to complete current open segment - rest := h.pool.SegmentSize - t.offset - if need < rest { - rest = need - } - copy(t.segment[t.offset:], b[:rest]) - need -= rest - size := (t.offset + rest) % h.pool.SegmentSize - // read full segments and the last possibly partial segment - for need > 0 { - // push all finished chunks we read - if t.cur%2 == 0 { - copy(t.section, t.segment) - } else { - copy(t.section[h.pool.SegmentSize:], t.segment) - h.writeSection(t.cur, t.section) + t := h.getTree() + secsize := 2 * h.pool.SegmentSize + // calculate length of missing bit to complete current open section + smax := secsize - t.offset + // if at the beginning of chunk or middle of the section + if t.offset < secsize { + // fill up current segment from buffer + copy(t.section[t.offset:], b) + // if input buffer consumed and open section not complete, then + // advance offset and return + if smax == 0 { + smax = secsize } - size = h.pool.SegmentSize - if need < size { - size = need + if l <= smax { + t.offset += l + return l, nil + } + } else { + // if end of a section + if t.cursor == h.pool.SegmentCount*2 { + return 0, nil } - copy(t.segment, b[rest:rest+size]) - need -= size - rest += size - t.cur++ } - t.offset = size % h.pool.SegmentSize + // read full sections and the last possibly partial section from the input buffer + for smax < l { + // section complete; push to tree asynchronously + go h.writeSection(t.cursor, t.section, true, false) + // reset section + t.section = make([]byte, secsize) + // copy from input buffer at smax to right half of section + copy(t.section, b[smax:]) + // advance cursor + t.cursor++ + // smax here represents successive offsets in the input buffer + smax += secsize + } + t.offset = l - smax + secsize return l, nil } // Reset needs to be called before writing to the hasher func (h *Hasher) Reset() { - h.getTree() + h.releaseTree() } -// Hasher implements the SwarmHash interface +// methods needed to implement the SwarmHash interface // ResetWithLength needs to be called before writing to the hasher // the argument is supposed to be the byte slice binary representation of // the length of the data subsumed under the hash, i.e., span func (h *Hasher) ResetWithLength(span []byte) { h.Reset() - h.bmt.span = span + h.getTree().span = span } // releaseTree gives back the Tree to the pool whereby it unlocks // it resets tree, segment and index func (h *Hasher) releaseTree() { t := h.bmt - if t != nil { - t.cur = 0 + if t == nil { + return + } + h.bmt = nil + go func() { + t.cursor = 0 t.offset = 0 t.span = nil - t.hash = nil - h.bmt = nil + t.section = make([]byte, h.pool.SegmentSize*2) + select { + case <-t.result: + default: + } h.pool.release(t) + }() +} + +// NewAsyncWriter extends Hasher with an interface for concurrent segment/section writes +func (h *Hasher) NewAsyncWriter(double bool) *AsyncHasher { + secsize := h.pool.SegmentSize + if double { + secsize *= 2 + } + write := func(i int, section []byte, final bool) { + h.writeSection(i, section, double, final) + } + return &AsyncHasher{ + Hasher: h, + double: double, + secsize: secsize, + write: write, } } -// TODO: writeSegment writes the ith segment into the BMT tree -// func (h *Hasher) writeSegment(i int, s []byte) { -// go h.run(h.bmt.leaves[i/2], h.pool.hasher(), i%2 == 0, s) -// } +// SectionWriter is an asynchronous segment/section writer interface +type SectionWriter interface { + Reset() // standard init to be called before reuse + Write(index int, data []byte) // write into section of index + Sum(b []byte, length int, span []byte) []byte // returns the hash of the buffer + SectionSize() int // size of the async section unit to use +} -// writeSection writes the hash of i/2-th segction into right level 1 node of the BMT tree -func (h *Hasher) writeSection(i int, section []byte) { - n := h.bmt.leaves[i/2] - isLeft := n.isLeft - n = n.parent - bh := h.pool.hasher() - bh.Write(section) - go func() { - sum := bh.Sum(nil) - if n == nil { - h.bmt.result <- sum +// AsyncHasher extends BMT Hasher with an asynchronous segment/section writer interface +// AsyncHasher is unsafe and does not check indexes and section data lengths +// it must be used with the right indexes and length and the right number of sections +// +// behaviour is undefined if +// * non-final sections are shorter or longer than secsize +// * if final section does not match length +// * write a section with index that is higher than length/secsize +// * set length in Sum call when length/secsize < maxsec +// +// * if Sum() is not called on a Hasher that is fully written +// a process will block, can be terminated with Reset +// * it will not leak processes if not all sections are written but it blocks +// and keeps the resource which can be released calling Reset() +type AsyncHasher struct { + *Hasher // extends the Hasher + mtx sync.Mutex // to lock the cursor access + double bool // whether to use double segments (call Hasher.writeSection) + secsize int // size of base section (size of hash or double) + write func(i int, section []byte, final bool) +} + +// methods needed to implement AsyncWriter + +// SectionSize returns the size of async section unit to use +func (sw *AsyncHasher) SectionSize() int { + return sw.secsize +} + +// Write writes the i-th section of the BMT base +// this function can and is meant to be called concurrently +// it sets max segment threadsafely +func (sw *AsyncHasher) Write(i int, section []byte) { + sw.mtx.Lock() + defer sw.mtx.Unlock() + t := sw.getTree() + // cursor keeps track of the rightmost section written so far + // if index is lower than cursor then just write non-final section as is + if i < t.cursor { + // if index is not the rightmost, safe to write section + go sw.write(i, section, false) + return + } + // if there is a previous rightmost section safe to write section + if t.offset > 0 { + if i == t.cursor { + // i==cursor implies cursor was set by Hash call so we can write section as final one + // since it can be shorter, first we copy it to the padded buffer + t.section = make([]byte, sw.secsize) + copy(t.section, section) + go sw.write(i, t.section, true) return } - h.run(n, bh, isLeft, sum) - }() + // the rightmost section just changed, so we write the previous one as non-final + go sw.write(t.cursor, t.section, false) + } + // set i as the index of the righmost section written so far + // set t.offset to cursor*secsize+1 + t.cursor = i + t.offset = i*sw.secsize + 1 + t.section = make([]byte, sw.secsize) + copy(t.section, section) } -// run pushes the data to the node -// if it is the first of 2 sisters written the routine returns +// Sum can be called any time once the length and the span is known +// potentially even before all segments have been written +// in such cases Sum will block until all segments are present and +// the hash for the length can be calculated. +// +// b: digest is appended to b +// length: known length of the input (unsafe; undefined if out of range) +// meta: metadata to hash together with BMT root for the final digest +// e.g., span for protection against existential forgery +func (sw *AsyncHasher) Sum(b []byte, length int, meta []byte) (s []byte) { + sw.mtx.Lock() + t := sw.getTree() + if length == 0 { + sw.mtx.Unlock() + s = sw.pool.zerohashes[sw.pool.Depth] + } else { + // for non-zero input the rightmost section is written to the tree asynchronously + // if the actual last section has been written (t.cursor == length/t.secsize) + maxsec := (length - 1) / sw.secsize + if t.offset > 0 { + go sw.write(t.cursor, t.section, maxsec == t.cursor) + } + // set cursor to maxsec so final section is written when it arrives + t.cursor = maxsec + t.offset = length + result := t.result + sw.mtx.Unlock() + // wait for the result or reset + s = <-result + } + // relesase the tree back to the pool + sw.releaseTree() + // if no meta is given just append digest to b + if len(meta) == 0 { + return append(b, s...) + } + // hash together meta and BMT root hash using the pools + return doSum(sw.pool.hasher(), b, meta, s) +} + +// writeSection writes the hash of i-th section into level 1 node of the BMT tree +func (h *Hasher) writeSection(i int, section []byte, double bool, final bool) { + // select the leaf node for the section + var n *node + var isLeft bool + var hasher hash.Hash + var level int + t := h.getTree() + if double { + level++ + n = t.leaves[i] + hasher = n.hasher + isLeft = n.isLeft + n = n.parent + // hash the section + section = doSum(hasher, nil, section) + } else { + n = t.leaves[i/2] + hasher = n.hasher + isLeft = i%2 == 0 + } + // write hash into parent node + if final { + // for the last segment use writeFinalNode + h.writeFinalNode(level, n, hasher, isLeft, section) + } else { + h.writeNode(n, hasher, isLeft, section) + } +} + +// writeNode pushes the data to the node +// if it is the first of 2 sisters written, the routine terminates // if it is the second, it calculates the hash and writes it // to the parent node recursively -func (h *Hasher) run(n *node, bh hash.Hash, isLeft bool, s []byte) { +// since hashing the parent is synchronous the same hasher can be used +func (h *Hasher) writeNode(n *node, bh hash.Hash, isLeft bool, s []byte) { + level := 1 for { + // at the root of the bmt just write the result to the result channel + if n == nil { + h.getTree().result <- s + return + } + // otherwise assign child hash to left or right segment if isLeft { n.left = s } else { n.right = s } - // the child-thread first arriving will quit + // the child-thread first arriving will terminate if n.toggle() { return } - // the second thread now can be sure both left and right children are written - // it calculates the hash of left|right and take it to the next level - bh.Reset() - bh.Write(n.left) - bh.Write(n.right) - s = bh.Sum(nil) - - // at the root of the bmt just write the result to the result channel - if n.parent == nil { - h.bmt.result <- s - return - } - - // otherwise iterate on parent + // the thread coming second now can be sure both left and right children are written + // so it calculates the hash of left|right and pushes it to the parent + s = doSum(bh, nil, n.left, n.right) isLeft = n.isLeft n = n.parent + level++ } } -// finalise is following the path starting from the final datasegment to the +// writeFinalNode is following the path starting from the final datasegment to the // BMT root via parents // for unbalanced trees it fills in the missing right sister nodes using // the pool's lookup table for BMT subtree root hashes for all-zero sections -func (h *Hasher) finalise(skip bool) { - t := h.bmt - isLeft := t.cur%2 == 0 - n := t.leaves[t.cur/2] - for level := 0; n != nil; level++ { - // when the final segment's path is going via left child node - // we include an all-zero subtree hash for the right level and toggle the node. - // when the path is going through right child node, nothing to do - if isLeft && !skip { +// otherwise behaves like `writeNode` +func (h *Hasher) writeFinalNode(level int, n *node, bh hash.Hash, isLeft bool, s []byte) { + + for { + // at the root of the bmt just write the result to the result channel + if n == nil { + if s != nil { + h.getTree().result <- s + } + return + } + var noHash bool + if isLeft { + // coming from left sister branch + // when the final section's path is going via left child node + // we include an all-zero subtree hash for the right level and toggle the node. n.right = h.pool.zerohashes[level] - n.toggle() + if s != nil { + n.left = s + // if a left final node carries a hash, it must be the first (and only thread) + // so the toggle is already in passive state no need no call + // yet thread needs to carry on pushing hash to parent + noHash = false + } else { + // if again first thread then propagate nil and calculate no hash + noHash = n.toggle() + } + } else { + // right sister branch + if s != nil { + // if hash was pushed from right child node, write right segment change state + n.right = s + // if toggle is true, we arrived first so no hashing just push nil to parent + noHash = n.toggle() + + } else { + // if s is nil, then thread arrived first at previous node and here there will be two, + // so no need to do anything and keep s = nil for parent + noHash = true + } } - skip = false + // the child-thread first arriving will just continue resetting s to nil + // the second thread now can be sure both left and right children are written + // it calculates the hash of left|right and pushes it to the parent + if noHash { + s = nil + } else { + s = doSum(bh, nil, n.left, n.right) + } + // iterate to parent isLeft = n.isLeft n = n.parent + level++ } } -// getTree obtains a BMT resource by reserving one from the pool +// getTree obtains a BMT resource by reserving one from the pool and assigns it to the bmt field func (h *Hasher) getTree() *tree { if h.bmt != nil { return h.bmt @@ -525,6 +662,16 @@ func (n *node) toggle() bool { return atomic.AddInt32(&n.state, 1)%2 == 1 } +// calculates the hash of the data using hash.Hash +func doSum(h hash.Hash, b []byte, data ...[]byte) []byte { + h.Reset() + for _, v := range data { + h.Write(v) + } + return h.Sum(b) +} + +// hashstr is a pretty printer for bytes used in tree.draw func hashstr(b []byte) string { end := len(b) if end > 4 { diff --git a/swarm/bmt/bmt_r.go b/swarm/bmt/bmt_r.go index c61d2dc73212..0cb6c146f5d7 100644 --- a/swarm/bmt/bmt_r.go +++ b/swarm/bmt/bmt_r.go @@ -80,6 +80,5 @@ func (rh *RefHasher) hash(data []byte, length int) []byte { } rh.hasher.Reset() rh.hasher.Write(section) - s := rh.hasher.Sum(nil) - return s + return rh.hasher.Sum(nil) } diff --git a/swarm/bmt/bmt_test.go b/swarm/bmt/bmt_test.go index e074d90e73dd..760aa11d8b0f 100644 --- a/swarm/bmt/bmt_test.go +++ b/swarm/bmt/bmt_test.go @@ -34,18 +34,24 @@ import ( // the actual data length generated (could be longer than max datalength of the BMT) const BufferSize = 4128 +const ( + // segmentCount is the maximum number of segments of the underlying chunk + // Should be equal to max-chunk-data-size / hash-size + // Currently set to 128 == 4096 (default chunk size) / 32 (sha3.keccak256 size) + segmentCount = 128 +) + +var counts = []int{1, 2, 3, 4, 5, 8, 9, 15, 16, 17, 32, 37, 42, 53, 63, 64, 65, 111, 127, 128} + +// calculates the Keccak256 SHA3 hash of the data func sha3hash(data ...[]byte) []byte { h := sha3.NewKeccak256() - for _, v := range data { - h.Write(v) - } - return h.Sum(nil) + return doSum(h, nil, data...) } // TestRefHasher tests that the RefHasher computes the expected BMT hash for -// all data lengths between 0 and 256 bytes +// some small data lengths func TestRefHasher(t *testing.T) { - // the test struct is used to specify the expected BMT hash for // segment counts between from and to and lengths from 1 to datalength type test struct { @@ -129,31 +135,87 @@ func TestRefHasher(t *testing.T) { } } -func TestHasherCorrectness(t *testing.T) { - err := testHasher(testBaseHasher) - if err != nil { - t.Fatal(err) +// tests if hasher responds with correct hash comparing the reference implementation return value +func TestHasherEmptyData(t *testing.T) { + hasher := sha3.NewKeccak256 + var data []byte + for _, count := range counts { + t.Run(fmt.Sprintf("%d_segments", count), func(t *testing.T) { + pool := NewTreePool(hasher, count, PoolSize) + defer pool.Drain(0) + bmt := New(pool) + rbmt := NewRefHasher(hasher, count) + refHash := rbmt.Hash(data) + expHash := syncHash(bmt, nil, data) + if !bytes.Equal(expHash, refHash) { + t.Fatalf("hash mismatch with reference. expected %x, got %x", refHash, expHash) + } + }) } } -func testHasher(f func(BaseHasherFunc, []byte, int, int) error) error { +// tests sequential write with entire max size written in one go +func TestSyncHasherCorrectness(t *testing.T) { data := newData(BufferSize) hasher := sha3.NewKeccak256 size := hasher().Size() - counts := []int{1, 2, 3, 4, 5, 8, 16, 32, 64, 128} var err error for _, count := range counts { - max := count * size - incr := 1 - for n := 1; n <= max; n += incr { - err = f(hasher, data, n, count) - if err != nil { - return err + t.Run(fmt.Sprintf("segments_%v", count), func(t *testing.T) { + max := count * size + var incr int + capacity := 1 + pool := NewTreePool(hasher, count, capacity) + defer pool.Drain(0) + for n := 0; n <= max; n += incr { + incr = 1 + rand.Intn(5) + bmt := New(pool) + err = testHasherCorrectness(bmt, hasher, data, n, count) + if err != nil { + t.Fatal(err) + } + } + }) + } +} + +// tests order-neutral concurrent writes with entire max size written in one go +func TestAsyncCorrectness(t *testing.T) { + data := newData(BufferSize) + hasher := sha3.NewKeccak256 + size := hasher().Size() + whs := []whenHash{first, last, random} + + for _, double := range []bool{false, true} { + for _, wh := range whs { + for _, count := range counts { + t.Run(fmt.Sprintf("double_%v_hash_when_%v_segments_%v", double, wh, count), func(t *testing.T) { + max := count * size + var incr int + capacity := 1 + pool := NewTreePool(hasher, count, capacity) + defer pool.Drain(0) + for n := 1; n <= max; n += incr { + incr = 1 + rand.Intn(5) + bmt := New(pool) + d := data[:n] + rbmt := NewRefHasher(hasher, count) + exp := rbmt.Hash(d) + got := syncHash(bmt, nil, d) + if !bytes.Equal(got, exp) { + t.Fatalf("wrong sync hash for datalength %v: expected %x (ref), got %x", n, exp, got) + } + sw := bmt.NewAsyncWriter(double) + got = asyncHashRandom(sw, nil, d, wh) + if !bytes.Equal(got, exp) { + t.Fatalf("wrong async hash for datalength %v: expected %x, got %x", n, exp, got) + } + } + }) } } } - return nil } // Tests that the BMT hasher can be synchronously reused with poolsizes 1 and PoolSize @@ -166,26 +228,27 @@ func TestHasherReuse(t *testing.T) { }) } +// tests if bmt reuse is not corrupting result func testHasherReuse(poolsize int, t *testing.T) { hasher := sha3.NewKeccak256 - pool := NewTreePool(hasher, SegmentCount, poolsize) + pool := NewTreePool(hasher, segmentCount, poolsize) defer pool.Drain(0) bmt := New(pool) for i := 0; i < 100; i++ { data := newData(BufferSize) - n := rand.Intn(bmt.DataLength()) - err := testHasherCorrectness(bmt, hasher, data, n, SegmentCount) + n := rand.Intn(bmt.Size()) + err := testHasherCorrectness(bmt, hasher, data, n, segmentCount) if err != nil { t.Fatal(err) } } } -// Tests if pool can be cleanly reused even in concurrent use -func TestBMTHasherConcurrentUse(t *testing.T) { +// Tests if pool can be cleanly reused even in concurrent use by several hasher +func TestBMTConcurrentUse(t *testing.T) { hasher := sha3.NewKeccak256 - pool := NewTreePool(hasher, SegmentCount, PoolSize) + pool := NewTreePool(hasher, segmentCount, PoolSize) defer pool.Drain(0) cycles := 100 errc := make(chan error) @@ -194,7 +257,7 @@ func TestBMTHasherConcurrentUse(t *testing.T) { go func() { bmt := New(pool) data := newData(BufferSize) - n := rand.Intn(bmt.DataLength()) + n := rand.Intn(bmt.Size()) errc <- testHasherCorrectness(bmt, hasher, data, n, 128) }() } @@ -215,12 +278,69 @@ LOOP: } } -// helper function that creates a tree pool -func testBaseHasher(hasher BaseHasherFunc, d []byte, n, count int) error { - pool := NewTreePool(hasher, count, 1) - defer pool.Drain(0) - bmt := New(pool) - return testHasherCorrectness(bmt, hasher, d, n, count) +// Tests BMT Hasher io.Writer interface is working correctly +// even multiple short random write buffers +func TestBMTWriterBuffers(t *testing.T) { + hasher := sha3.NewKeccak256 + + for _, count := range counts { + t.Run(fmt.Sprintf("%d_segments", count), func(t *testing.T) { + errc := make(chan error) + pool := NewTreePool(hasher, count, PoolSize) + defer pool.Drain(0) + n := count * 32 + bmt := New(pool) + data := newData(n) + rbmt := NewRefHasher(hasher, count) + refHash := rbmt.Hash(data) + expHash := syncHash(bmt, nil, data) + if !bytes.Equal(expHash, refHash) { + t.Fatalf("hash mismatch with reference. expected %x, got %x", refHash, expHash) + } + attempts := 10 + f := func() error { + bmt := New(pool) + bmt.Reset() + var buflen int + for offset := 0; offset < n; offset += buflen { + buflen = rand.Intn(n-offset) + 1 + read, err := bmt.Write(data[offset : offset+buflen]) + if err != nil { + return err + } + if read != buflen { + return fmt.Errorf("incorrect read. expected %v bytes, got %v", buflen, read) + } + } + hash := bmt.Sum(nil) + if !bytes.Equal(hash, expHash) { + return fmt.Errorf("hash mismatch. expected %x, got %x", hash, expHash) + } + return nil + } + + for j := 0; j < attempts; j++ { + go func() { + errc <- f() + }() + } + timeout := time.NewTimer(2 * time.Second) + for { + select { + case err := <-errc: + if err != nil { + t.Fatal(err) + } + attempts-- + if attempts == 0 { + return + } + case <-timeout.C: + t.Fatalf("timeout") + } + } + }) + } } // helper function that compares reference and optimised implementations on @@ -234,57 +354,65 @@ func testHasherCorrectness(bmt *Hasher, hasher BaseHasherFunc, d []byte, n, coun data := d[:n] rbmt := NewRefHasher(hasher, count) exp := sha3hash(span, rbmt.Hash(data)) - got := Hash(bmt, span, data) + got := syncHash(bmt, span, data) if !bytes.Equal(got, exp) { return fmt.Errorf("wrong hash: expected %x, got %x", exp, got) } return err } -func BenchmarkSHA3_4k(t *testing.B) { benchmarkSHA3(4096, t) } -func BenchmarkSHA3_2k(t *testing.B) { benchmarkSHA3(4096/2, t) } -func BenchmarkSHA3_1k(t *testing.B) { benchmarkSHA3(4096/4, t) } -func BenchmarkSHA3_512b(t *testing.B) { benchmarkSHA3(4096/8, t) } -func BenchmarkSHA3_256b(t *testing.B) { benchmarkSHA3(4096/16, t) } -func BenchmarkSHA3_128b(t *testing.B) { benchmarkSHA3(4096/32, t) } - -func BenchmarkBMTBaseline_4k(t *testing.B) { benchmarkBMTBaseline(4096, t) } -func BenchmarkBMTBaseline_2k(t *testing.B) { benchmarkBMTBaseline(4096/2, t) } -func BenchmarkBMTBaseline_1k(t *testing.B) { benchmarkBMTBaseline(4096/4, t) } -func BenchmarkBMTBaseline_512b(t *testing.B) { benchmarkBMTBaseline(4096/8, t) } -func BenchmarkBMTBaseline_256b(t *testing.B) { benchmarkBMTBaseline(4096/16, t) } -func BenchmarkBMTBaseline_128b(t *testing.B) { benchmarkBMTBaseline(4096/32, t) } - -func BenchmarkRefHasher_4k(t *testing.B) { benchmarkRefHasher(4096, t) } -func BenchmarkRefHasher_2k(t *testing.B) { benchmarkRefHasher(4096/2, t) } -func BenchmarkRefHasher_1k(t *testing.B) { benchmarkRefHasher(4096/4, t) } -func BenchmarkRefHasher_512b(t *testing.B) { benchmarkRefHasher(4096/8, t) } -func BenchmarkRefHasher_256b(t *testing.B) { benchmarkRefHasher(4096/16, t) } -func BenchmarkRefHasher_128b(t *testing.B) { benchmarkRefHasher(4096/32, t) } - -func BenchmarkBMTHasher_4k(t *testing.B) { benchmarkBMTHasher(4096, t) } -func BenchmarkBMTHasher_2k(t *testing.B) { benchmarkBMTHasher(4096/2, t) } -func BenchmarkBMTHasher_1k(t *testing.B) { benchmarkBMTHasher(4096/4, t) } -func BenchmarkBMTHasher_512b(t *testing.B) { benchmarkBMTHasher(4096/8, t) } -func BenchmarkBMTHasher_256b(t *testing.B) { benchmarkBMTHasher(4096/16, t) } -func BenchmarkBMTHasher_128b(t *testing.B) { benchmarkBMTHasher(4096/32, t) } - -func BenchmarkBMTHasherNoPool_4k(t *testing.B) { benchmarkBMTHasherPool(1, 4096, t) } -func BenchmarkBMTHasherNoPool_2k(t *testing.B) { benchmarkBMTHasherPool(1, 4096/2, t) } -func BenchmarkBMTHasherNoPool_1k(t *testing.B) { benchmarkBMTHasherPool(1, 4096/4, t) } -func BenchmarkBMTHasherNoPool_512b(t *testing.B) { benchmarkBMTHasherPool(1, 4096/8, t) } -func BenchmarkBMTHasherNoPool_256b(t *testing.B) { benchmarkBMTHasherPool(1, 4096/16, t) } -func BenchmarkBMTHasherNoPool_128b(t *testing.B) { benchmarkBMTHasherPool(1, 4096/32, t) } - -func BenchmarkBMTHasherPool_4k(t *testing.B) { benchmarkBMTHasherPool(PoolSize, 4096, t) } -func BenchmarkBMTHasherPool_2k(t *testing.B) { benchmarkBMTHasherPool(PoolSize, 4096/2, t) } -func BenchmarkBMTHasherPool_1k(t *testing.B) { benchmarkBMTHasherPool(PoolSize, 4096/4, t) } -func BenchmarkBMTHasherPool_512b(t *testing.B) { benchmarkBMTHasherPool(PoolSize, 4096/8, t) } -func BenchmarkBMTHasherPool_256b(t *testing.B) { benchmarkBMTHasherPool(PoolSize, 4096/16, t) } -func BenchmarkBMTHasherPool_128b(t *testing.B) { benchmarkBMTHasherPool(PoolSize, 4096/32, t) } +// +func BenchmarkBMT(t *testing.B) { + for size := 4096; size >= 128; size /= 2 { + t.Run(fmt.Sprintf("%v_size_%v", "SHA3", size), func(t *testing.B) { + benchmarkSHA3(t, size) + }) + t.Run(fmt.Sprintf("%v_size_%v", "Baseline", size), func(t *testing.B) { + benchmarkBMTBaseline(t, size) + }) + t.Run(fmt.Sprintf("%v_size_%v", "REF", size), func(t *testing.B) { + benchmarkRefHasher(t, size) + }) + t.Run(fmt.Sprintf("%v_size_%v", "BMT", size), func(t *testing.B) { + benchmarkBMT(t, size) + }) + } +} + +type whenHash = int + +const ( + first whenHash = iota + last + random +) + +func BenchmarkBMTAsync(t *testing.B) { + whs := []whenHash{first, last, random} + for size := 4096; size >= 128; size /= 2 { + for _, wh := range whs { + for _, double := range []bool{false, true} { + t.Run(fmt.Sprintf("double_%v_hash_when_%v_size_%v", double, wh, size), func(t *testing.B) { + benchmarkBMTAsync(t, size, wh, double) + }) + } + } + } +} + +func BenchmarkPool(t *testing.B) { + caps := []int{1, PoolSize} + for size := 4096; size >= 128; size /= 2 { + for _, c := range caps { + t.Run(fmt.Sprintf("poolsize_%v_size_%v", c, size), func(t *testing.B) { + benchmarkPool(t, c, size) + }) + } + } +} // benchmarks simple sha3 hash on chunks -func benchmarkSHA3(n int, t *testing.B) { +func benchmarkSHA3(t *testing.B, n int) { data := newData(n) hasher := sha3.NewKeccak256 h := hasher() @@ -292,9 +420,7 @@ func benchmarkSHA3(n int, t *testing.B) { t.ReportAllocs() t.ResetTimer() for i := 0; i < t.N; i++ { - h.Reset() - h.Write(data) - h.Sum(nil) + doSum(h, nil, data) } } @@ -303,7 +429,7 @@ func benchmarkSHA3(n int, t *testing.B) { // doing it on n PoolSize each reusing the base hasher // the premise is that this is the minimum computation needed for a BMT // therefore this serves as a theoretical optimum for concurrent implementations -func benchmarkBMTBaseline(n int, t *testing.B) { +func benchmarkBMTBaseline(t *testing.B, n int) { hasher := sha3.NewKeccak256 hashSize := hasher().Size() data := newData(hashSize) @@ -320,9 +446,7 @@ func benchmarkBMTBaseline(n int, t *testing.B) { defer wg.Done() h := hasher() for atomic.AddInt32(&i, 1) < count { - h.Reset() - h.Write(data) - h.Sum(nil) + doSum(h, nil, data) } }() } @@ -331,24 +455,42 @@ func benchmarkBMTBaseline(n int, t *testing.B) { } // benchmarks BMT Hasher -func benchmarkBMTHasher(n int, t *testing.B) { +func benchmarkBMT(t *testing.B, n int) { data := newData(n) hasher := sha3.NewKeccak256 - pool := NewTreePool(hasher, SegmentCount, PoolSize) + pool := NewTreePool(hasher, segmentCount, PoolSize) + bmt := New(pool) + + t.ReportAllocs() + t.ResetTimer() + for i := 0; i < t.N; i++ { + syncHash(bmt, nil, data) + } +} + +// benchmarks BMT hasher with asynchronous concurrent segment/section writes +func benchmarkBMTAsync(t *testing.B, n int, wh whenHash, double bool) { + data := newData(n) + hasher := sha3.NewKeccak256 + pool := NewTreePool(hasher, segmentCount, PoolSize) + bmt := New(pool).NewAsyncWriter(double) + idxs, segments := splitAndShuffle(bmt.SectionSize(), data) + shuffle(len(idxs), func(i int, j int) { + idxs[i], idxs[j] = idxs[j], idxs[i] + }) t.ReportAllocs() t.ResetTimer() for i := 0; i < t.N; i++ { - bmt := New(pool) - Hash(bmt, nil, data) + asyncHash(bmt, nil, n, wh, idxs, segments) } } // benchmarks 100 concurrent bmt hashes with pool capacity -func benchmarkBMTHasherPool(poolsize, n int, t *testing.B) { +func benchmarkPool(t *testing.B, poolsize, n int) { data := newData(n) hasher := sha3.NewKeccak256 - pool := NewTreePool(hasher, SegmentCount, poolsize) + pool := NewTreePool(hasher, segmentCount, poolsize) cycles := 100 t.ReportAllocs() @@ -360,7 +502,7 @@ func benchmarkBMTHasherPool(poolsize, n int, t *testing.B) { go func() { defer wg.Done() bmt := New(pool) - Hash(bmt, nil, data) + syncHash(bmt, nil, data) }() } wg.Wait() @@ -368,7 +510,7 @@ func benchmarkBMTHasherPool(poolsize, n int, t *testing.B) { } // benchmarks the reference hasher -func benchmarkRefHasher(n int, t *testing.B) { +func benchmarkRefHasher(t *testing.B, n int) { data := newData(n) hasher := sha3.NewKeccak256 rbmt := NewRefHasher(hasher, 128) @@ -388,3 +530,93 @@ func newData(bufferSize int) []byte { } return data } + +// Hash hashes the data and the span using the bmt hasher +func syncHash(h *Hasher, span, data []byte) []byte { + h.ResetWithLength(span) + h.Write(data) + return h.Sum(nil) +} + +func splitAndShuffle(secsize int, data []byte) (idxs []int, segments [][]byte) { + l := len(data) + n := l / secsize + if l%secsize > 0 { + n++ + } + for i := 0; i < n; i++ { + idxs = append(idxs, i) + end := (i + 1) * secsize + if end > l { + end = l + } + section := data[i*secsize : end] + segments = append(segments, section) + } + shuffle(n, func(i int, j int) { + idxs[i], idxs[j] = idxs[j], idxs[i] + }) + return idxs, segments +} + +// splits the input data performs a random shuffle to mock async section writes +func asyncHashRandom(bmt SectionWriter, span []byte, data []byte, wh whenHash) (s []byte) { + idxs, segments := splitAndShuffle(bmt.SectionSize(), data) + return asyncHash(bmt, span, len(data), wh, idxs, segments) +} + +// mock for async section writes for BMT SectionWriter +// requires a permutation (a random shuffle) of list of all indexes of segments +// and writes them in order to the appropriate section +// the Sum function is called according to the wh parameter (first, last, random [relative to segment writes]) +func asyncHash(bmt SectionWriter, span []byte, l int, wh whenHash, idxs []int, segments [][]byte) (s []byte) { + bmt.Reset() + if l == 0 { + return bmt.Sum(nil, l, span) + } + c := make(chan []byte, 1) + hashf := func() { + c <- bmt.Sum(nil, l, span) + } + maxsize := len(idxs) + var r int + if wh == random { + r = rand.Intn(maxsize) + } + for i, idx := range idxs { + bmt.Write(idx, segments[idx]) + if (wh == first || wh == random) && i == r { + go hashf() + } + } + if wh == last { + return bmt.Sum(nil, l, span) + } + return <-c +} + +// this is also in swarm/network_test.go +// shuffle pseudo-randomizes the order of elements. +// n is the number of elements. Shuffle panics if n < 0. +// swap swaps the elements with indexes i and j. +func shuffle(n int, swap func(i, j int)) { + if n < 0 { + panic("invalid argument to Shuffle") + } + + // Fisher-Yates shuffle: https://en.wikipedia.org/wiki/Fisher%E2%80%93Yates_shuffle + // Shuffle really ought not be called with n that doesn't fit in 32 bits. + // Not only will it take a very long time, but with 2³¹! possible permutations, + // there's no way that any PRNG can have a big enough internal state to + // generate even a minuscule percentage of the possible permutations. + // Nevertheless, the right API signature accepts an int n, so handle it as best we can. + i := n - 1 + for ; i > 1<<31-1-1; i-- { + j := int(rand.Int63n(int64(i + 1))) + swap(i, j) + } + for ; i > 0; i-- { + j := int(rand.Int31n(int32(i + 1))) + swap(i, j) + } +} diff --git a/swarm/chunk/chunk.go b/swarm/chunk/chunk.go new file mode 100644 index 000000000000..1449efccd0ef --- /dev/null +++ b/swarm/chunk/chunk.go @@ -0,0 +1,5 @@ +package chunk + +const ( + DefaultSize = 4096 +) diff --git a/swarm/fuse/fuse_file.go b/swarm/fuse/fuse_file.go index 80c26fe05fcd..ca04f737e2a7 100644 --- a/swarm/fuse/fuse_file.go +++ b/swarm/fuse/fuse_file.go @@ -84,9 +84,9 @@ func (sf *SwarmFile) Attr(ctx context.Context, a *fuse.Attr) error { a.Gid = uint32(os.Getegid()) if sf.fileSize == -1 { - reader, _ := sf.mountInfo.swarmApi.Retrieve(sf.addr) + reader, _ := sf.mountInfo.swarmApi.Retrieve(ctx, sf.addr) quitC := make(chan bool) - size, err := reader.Size(quitC) + size, err := reader.Size(ctx, quitC) if err != nil { log.Error("Couldnt get size of file %s : %v", sf.path, err) return err @@ -104,7 +104,7 @@ func (sf *SwarmFile) Read(ctx context.Context, req *fuse.ReadRequest, resp *fuse sf.lock.RLock() defer sf.lock.RUnlock() if sf.reader == nil { - sf.reader, _ = sf.mountInfo.swarmApi.Retrieve(sf.addr) + sf.reader, _ = sf.mountInfo.swarmApi.Retrieve(ctx, sf.addr) } buf := make([]byte, req.Size) n, err := sf.reader.ReadAt(buf, req.Offset) diff --git a/swarm/fuse/swarmfs_test.go b/swarm/fuse/swarmfs_test.go index ed2021c4e0ee..6efeb78d9bd5 100644 --- a/swarm/fuse/swarmfs_test.go +++ b/swarm/fuse/swarmfs_test.go @@ -20,6 +20,7 @@ package fuse import ( "bytes" + "context" "crypto/rand" "flag" "fmt" @@ -110,7 +111,7 @@ func createTestFilesAndUploadToSwarm(t *testing.T, api *api.API, files map[strin } //upload directory to swarm and return hash - bzzhash, err := api.Upload(uploadDir, "", toEncrypt) + bzzhash, err := api.Upload(context.TODO(), uploadDir, "", toEncrypt) if err != nil { t.Fatalf("Error uploading directory %v: %vm encryption: %v", uploadDir, err, toEncrypt) } @@ -1649,7 +1650,7 @@ func TestFUSE(t *testing.T) { if err != nil { t.Fatal(err) } - ta := &testAPI{api: api.NewAPI(fileStore, nil, nil)} + ta := &testAPI{api: api.NewAPI(fileStore, nil, nil, nil)} //run a short suite of tests //approx time: 28s diff --git a/swarm/fuse/swarmfs_unix.go b/swarm/fuse/swarmfs_unix.go index 74dd84a90351..9ff55cc324f7 100644 --- a/swarm/fuse/swarmfs_unix.go +++ b/swarm/fuse/swarmfs_unix.go @@ -19,6 +19,7 @@ package fuse import ( + "context" "errors" "fmt" "os" @@ -104,7 +105,7 @@ func (swarmfs *SwarmFS) Mount(mhash, mountpoint string) (*MountInfo, error) { } log.Trace("swarmfs mount: getting manifest tree") - _, manifestEntryMap, err := swarmfs.swarmApi.BuildDirectoryTree(mhash, true) + _, manifestEntryMap, err := swarmfs.swarmApi.BuildDirectoryTree(context.TODO(), mhash, true) if err != nil { return nil, err } @@ -119,6 +120,10 @@ func (swarmfs *SwarmFS) Mount(mhash, mountpoint string) (*MountInfo, error) { log.Trace("swarmfs mount: traversing manifest map") for suffix, entry := range manifestEntryMap { + if suffix == "" { //empty suffix means that the file has no name - i.e. this is the default entry in a manifest. Since we cannot have files without a name, let us ignore this entry + log.Warn("Manifest has an empty-path (default) entry which will be ignored in FUSE mount.") + continue + } addr := common.Hex2Bytes(entry.Hash) fullpath := "/" + suffix basepath := filepath.Dir(fullpath) diff --git a/swarm/fuse/swarmfs_util.go b/swarm/fuse/swarmfs_util.go index 9bbb0f6ac0e7..4f2e1416b615 100644 --- a/swarm/fuse/swarmfs_util.go +++ b/swarm/fuse/swarmfs_util.go @@ -47,7 +47,7 @@ func externalUnmount(mountPoint string) error { } func addFileToSwarm(sf *SwarmFile, content []byte, size int) error { - fkey, mhash, err := sf.mountInfo.swarmApi.AddFile(sf.mountInfo.LatestManifest, sf.path, sf.name, content, true) + fkey, mhash, err := sf.mountInfo.swarmApi.AddFile(context.TODO(), sf.mountInfo.LatestManifest, sf.path, sf.name, content, true) if err != nil { return err } @@ -66,7 +66,7 @@ func addFileToSwarm(sf *SwarmFile, content []byte, size int) error { } func removeFileFromSwarm(sf *SwarmFile) error { - mkey, err := sf.mountInfo.swarmApi.RemoveFile(sf.mountInfo.LatestManifest, sf.path, sf.name, true) + mkey, err := sf.mountInfo.swarmApi.RemoveFile(context.TODO(), sf.mountInfo.LatestManifest, sf.path, sf.name, true) if err != nil { return err } @@ -102,7 +102,7 @@ func removeDirectoryFromSwarm(sd *SwarmDir) error { } func appendToExistingFileInSwarm(sf *SwarmFile, content []byte, offset int64, length int64) error { - fkey, mhash, err := sf.mountInfo.swarmApi.AppendFile(sf.mountInfo.LatestManifest, sf.path, sf.name, sf.fileSize, content, sf.addr, offset, length, true) + fkey, mhash, err := sf.mountInfo.swarmApi.AppendFile(context.TODO(), sf.mountInfo.LatestManifest, sf.path, sf.name, sf.fileSize, content, sf.addr, offset, length, true) if err != nil { return err } diff --git a/swarm/metrics/flags.go b/swarm/metrics/flags.go index 795fc402ff08..79490fd36012 100644 --- a/swarm/metrics/flags.go +++ b/swarm/metrics/flags.go @@ -81,6 +81,9 @@ func Setup(ctx *cli.Context) { hosttag = ctx.GlobalString(metricsInfluxDBHostTagFlag.Name) ) + // Start system runtime metrics collection + go gethmetrics.CollectProcessMetrics(2 * time.Second) + if enableExport { log.Info("Enabling swarm metrics export to InfluxDB") go influxdb.InfluxDBWithTags(gethmetrics.DefaultRegistry, 10*time.Second, endpoint, database, username, password, "swarm.", map[string]string{ diff --git a/swarm/network/discovery.go b/swarm/network/discovery.go index c0868410c772..55bf7c03327b 100644 --- a/swarm/network/discovery.go +++ b/swarm/network/discovery.go @@ -17,6 +17,7 @@ package network import ( + "context" "fmt" "sync" @@ -48,7 +49,7 @@ func newDiscovery(p *BzzPeer, o Overlay) *discPeer { } // HandleMsg is the message handler that delegates incoming messages -func (d *discPeer) HandleMsg(msg interface{}) error { +func (d *discPeer) HandleMsg(ctx context.Context, msg interface{}) error { switch msg := msg.(type) { case *peersMsg: @@ -99,14 +100,14 @@ func (d *discPeer) NotifyPeer(a OverlayAddr, po uint8) { resp := &peersMsg{ Peers: []*BzzAddr{ToAddr(a)}, } - go d.Send(resp) + go d.Send(context.TODO(), resp) } // NotifyDepth sends a subPeers Msg to the receiver notifying them about // a change in the depth of saturation func (d *discPeer) NotifyDepth(po uint8) { // log.Trace(fmt.Sprintf("%08x peer %08x notified of new depth %v", d.localAddr.Over()[:4], d.Address()[:4], po)) - go d.Send(&subPeersMsg{Depth: po}) + go d.Send(context.TODO(), &subPeersMsg{Depth: po}) } /* @@ -178,7 +179,7 @@ func (d *discPeer) handleSubPeersMsg(msg *subPeersMsg) error { }) if len(peers) > 0 { // log.Debug(fmt.Sprintf("%08x: %v peers sent to %v", d.overlay.BaseAddr(), len(peers), d)) - go d.Send(&peersMsg{Peers: peers}) + go d.Send(context.TODO(), &peersMsg{Peers: peers}) } } d.sentPeers = true diff --git a/swarm/network/hive.go b/swarm/network/hive.go index a54a17d29deb..366021088365 100644 --- a/swarm/network/hive.go +++ b/swarm/network/hive.go @@ -102,10 +102,10 @@ func NewHive(params *HiveParams, overlay Overlay, store state.Store) *Hive { // server is used to connect to a peer based on its NodeID or enode URL // these are called on the p2p.Server which runs on the node func (h *Hive) Start(server *p2p.Server) error { - log.Info(fmt.Sprintf("%08x hive starting", h.BaseAddr()[:4])) + log.Info("Starting hive", "baseaddr", fmt.Sprintf("%x", h.BaseAddr()[:4])) // if state store is specified, load peers to prepopulate the overlay address book if h.Store != nil { - log.Info("detected an existing store. trying to load peers") + log.Info("Detected an existing store. trying to load peers") if err := h.loadPeers(); err != nil { log.Error(fmt.Sprintf("%08x hive encoutered an error trying to load peers", h.BaseAddr()[:4])) return err diff --git a/swarm/network/networkid_test.go b/swarm/network/networkid_test.go new file mode 100644 index 000000000000..05134b083b16 --- /dev/null +++ b/swarm/network/networkid_test.go @@ -0,0 +1,266 @@ +// Copyright 2018 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package network + +import ( + "bytes" + "context" + "flag" + "fmt" + "math/rand" + "strings" + "testing" + "time" + + "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/node" + "github.com/ethereum/go-ethereum/p2p" + "github.com/ethereum/go-ethereum/p2p/discover" + "github.com/ethereum/go-ethereum/p2p/simulations" + "github.com/ethereum/go-ethereum/p2p/simulations/adapters" + "github.com/ethereum/go-ethereum/rpc" +) + +var ( + currentNetworkID int + cnt int + nodeMap map[int][]discover.NodeID + kademlias map[discover.NodeID]*Kademlia +) + +const ( + NumberOfNets = 4 + MaxTimeout = 6 +) + +func init() { + flag.Parse() + rand.Seed(time.Now().Unix()) +} + +/* +Run the network ID test. +The test creates one simulations.Network instance, +a number of nodes, then connects nodes with each other in this network. + +Each node gets a network ID assigned according to the number of networks. +Having more network IDs is just arbitrary in order to exclude +false positives. + +Nodes should only connect with other nodes with the same network ID. +After the setup phase, the test checks on each node if it has the +expected node connections (excluding those not sharing the network ID). +*/ +func TestNetworkID(t *testing.T) { + log.Debug("Start test") + //arbitrarily set the number of nodes. It could be any number + numNodes := 24 + //the nodeMap maps all nodes (slice value) with the same network ID (key) + nodeMap = make(map[int][]discover.NodeID) + //set up the network and connect nodes + net, err := setupNetwork(numNodes) + if err != nil { + t.Fatalf("Error setting up network: %v", err) + } + defer func() { + //shutdown the snapshot network + log.Trace("Shutting down network") + net.Shutdown() + }() + //let's sleep to ensure all nodes are connected + time.Sleep(1 * time.Second) + //for each group sharing the same network ID... + for _, netIDGroup := range nodeMap { + log.Trace("netIDGroup size", "size", len(netIDGroup)) + //...check that their size of the kademlia is of the expected size + //the assumption is that it should be the size of the group minus 1 (the node itself) + for _, node := range netIDGroup { + if kademlias[node].addrs.Size() != len(netIDGroup)-1 { + t.Fatalf("Kademlia size has not expected peer size. Kademlia size: %d, expected size: %d", kademlias[node].addrs.Size(), len(netIDGroup)-1) + } + kademlias[node].EachAddr(nil, 0, func(addr OverlayAddr, _ int, _ bool) bool { + found := false + for _, nd := range netIDGroup { + p := ToOverlayAddr(nd.Bytes()) + if bytes.Equal(p, addr.Address()) { + found = true + } + } + if !found { + t.Fatalf("Expected node not found for node %s", node.String()) + } + return true + }) + } + } + log.Info("Test terminated successfully") +} + +// setup simulated network with bzz/discovery and pss services. +// connects nodes in a circle +// if allowRaw is set, omission of builtin pss encryption is enabled (see PssParams) +func setupNetwork(numnodes int) (net *simulations.Network, err error) { + log.Debug("Setting up network") + quitC := make(chan struct{}) + errc := make(chan error) + nodes := make([]*simulations.Node, numnodes) + if numnodes < 16 { + return nil, fmt.Errorf("Minimum sixteen nodes in network") + } + adapter := adapters.NewSimAdapter(newServices()) + //create the network + net = simulations.NewNetwork(adapter, &simulations.NetworkConfig{ + ID: "NetworkIdTestNet", + DefaultService: "bzz", + }) + log.Debug("Creating networks and nodes") + + var connCount int + + //create nodes and connect them to each other + for i := 0; i < numnodes; i++ { + log.Trace("iteration: ", "i", i) + nodeconf := adapters.RandomNodeConfig() + nodes[i], err = net.NewNodeWithConfig(nodeconf) + if err != nil { + return nil, fmt.Errorf("error creating node %d: %v", i, err) + } + err = net.Start(nodes[i].ID()) + if err != nil { + return nil, fmt.Errorf("error starting node %d: %v", i, err) + } + client, err := nodes[i].Client() + if err != nil { + return nil, fmt.Errorf("create node %d rpc client fail: %v", i, err) + } + //now setup and start event watching in order to know when we can upload + ctx, watchCancel := context.WithTimeout(context.Background(), MaxTimeout*time.Second) + defer watchCancel() + watchSubscriptionEvents(ctx, nodes[i].ID(), client, errc, quitC) + //on every iteration we connect to all previous ones + for k := i - 1; k >= 0; k-- { + connCount++ + log.Debug(fmt.Sprintf("Connecting node %d with node %d; connection count is %d", i, k, connCount)) + err = net.Connect(nodes[i].ID(), nodes[k].ID()) + if err != nil { + if !strings.Contains(err.Error(), "already connected") { + return nil, fmt.Errorf("error connecting nodes: %v", err) + } + } + } + } + //now wait until the number of expected subscriptions has been finished + //`watchSubscriptionEvents` will write with a `nil` value to errc + for err := range errc { + if err != nil { + return nil, err + } + //`nil` received, decrement count + connCount-- + log.Trace("count down", "cnt", connCount) + //all subscriptions received + if connCount == 0 { + close(quitC) + break + } + } + log.Debug("Network setup phase terminated") + return net, nil +} + +func newServices() adapters.Services { + kademlias = make(map[discover.NodeID]*Kademlia) + kademlia := func(id discover.NodeID) *Kademlia { + if k, ok := kademlias[id]; ok { + return k + } + addr := NewAddrFromNodeID(id) + params := NewKadParams() + params.MinProxBinSize = 2 + params.MaxBinSize = 3 + params.MinBinSize = 1 + params.MaxRetries = 1000 + params.RetryExponent = 2 + params.RetryInterval = 1000000 + kademlias[id] = NewKademlia(addr.Over(), params) + return kademlias[id] + } + return adapters.Services{ + "bzz": func(ctx *adapters.ServiceContext) (node.Service, error) { + addr := NewAddrFromNodeID(ctx.Config.ID) + hp := NewHiveParams() + hp.Discovery = false + cnt++ + //assign the network ID + currentNetworkID = cnt % NumberOfNets + if ok := nodeMap[currentNetworkID]; ok == nil { + nodeMap[currentNetworkID] = make([]discover.NodeID, 0) + } + //add this node to the group sharing the same network ID + nodeMap[currentNetworkID] = append(nodeMap[currentNetworkID], ctx.Config.ID) + log.Debug("current network ID:", "id", currentNetworkID) + config := &BzzConfig{ + OverlayAddr: addr.Over(), + UnderlayAddr: addr.Under(), + HiveParams: hp, + NetworkID: uint64(currentNetworkID), + } + return NewBzz(config, kademlia(ctx.Config.ID), nil, nil, nil), nil + }, + } +} + +func watchSubscriptionEvents(ctx context.Context, id discover.NodeID, client *rpc.Client, errc chan error, quitC chan struct{}) { + events := make(chan *p2p.PeerEvent) + sub, err := client.Subscribe(context.Background(), "admin", events, "peerEvents") + if err != nil { + log.Error(err.Error()) + errc <- fmt.Errorf("error getting peer events for node %v: %s", id, err) + return + } + go func() { + defer func() { + sub.Unsubscribe() + log.Trace("watch subscription events: unsubscribe", "id", id) + }() + + for { + select { + case <-quitC: + return + case <-ctx.Done(): + select { + case errc <- ctx.Err(): + case <-quitC: + } + return + case e := <-events: + if e.Type == p2p.PeerEventTypeAdd { + errc <- nil + } + case err := <-sub.Err(): + if err != nil { + select { + case errc <- fmt.Errorf("error getting peer events for node %v: %v", id, err): + case <-quitC: + } + return + } + } + } + }() +} diff --git a/swarm/network/protocol.go b/swarm/network/protocol.go index 39673f5a1d2e..7f7ca5eed579 100644 --- a/swarm/network/protocol.go +++ b/swarm/network/protocol.go @@ -44,7 +44,7 @@ const ( // BzzSpec is the spec of the generic swarm handshake var BzzSpec = &protocols.Spec{ Name: "bzz", - Version: 4, + Version: 6, MaxMsgSize: 10 * 1024 * 1024, Messages: []interface{}{ HandshakeMsg{}, @@ -54,7 +54,7 @@ var BzzSpec = &protocols.Spec{ // DiscoverySpec is the spec for the bzz discovery subprotocols var DiscoverySpec = &protocols.Spec{ Name: "hive", - Version: 4, + Version: 5, MaxMsgSize: 10 * 1024 * 1024, Messages: []interface{}{ peersMsg{}, @@ -82,9 +82,9 @@ type Peer interface { type Conn interface { ID() discover.NodeID // the key that uniquely identifies the Node for the peerPool Handshake(context.Context, interface{}, func(interface{}) error) (interface{}, error) // can send messages - Send(interface{}) error // can send messages + Send(context.Context, interface{}) error // can send messages Drop(error) // disconnect this peer - Run(func(interface{}) error) error // the run function to run a protocol + Run(func(context.Context, interface{}) error) error // the run function to run a protocol Off() OverlayAddr } @@ -94,12 +94,14 @@ type BzzConfig struct { UnderlayAddr []byte // node's underlay address HiveParams *HiveParams NetworkID uint64 + LightNode bool } // Bzz is the swarm protocol bundle type Bzz struct { *Hive NetworkID uint64 + LightNode bool localAddr *BzzAddr mtx sync.Mutex handshakes map[discover.NodeID]*HandshakeMsg @@ -116,6 +118,7 @@ func NewBzz(config *BzzConfig, kad Overlay, store state.Store, streamerSpec *pro return &Bzz{ Hive: NewHive(config.HiveParams, kad, store), NetworkID: config.NetworkID, + LightNode: config.LightNode, localAddr: &BzzAddr{config.OverlayAddr, config.UnderlayAddr}, handshakes: make(map[discover.NodeID]*HandshakeMsg), streamerRun: streamerRun, @@ -209,7 +212,11 @@ func (b *Bzz) RunProtocol(spec *protocols.Spec, run func(*BzzPeer) error) func(* localAddr: b.localAddr, BzzAddr: handshake.peerAddr, lastActive: time.Now(), + LightNode: handshake.LightNode, } + + log.Debug("peer created", "addr", handshake.peerAddr.String()) + return run(peer) } } @@ -228,6 +235,7 @@ func (b *Bzz) performHandshake(p *protocols.Peer, handshake *HandshakeMsg) error return err } handshake.peerAddr = rsh.(*HandshakeMsg).Addr + handshake.LightNode = rsh.(*HandshakeMsg).LightNode return nil } @@ -263,6 +271,7 @@ type BzzPeer struct { localAddr *BzzAddr // local Peers address *BzzAddr // remote address -> implements Addr interface = protocols.Peer lastActive time.Time // time is updated whenever mutexes are releasing + LightNode bool } func NewBzzTestPeer(p *protocols.Peer, addr *BzzAddr) *BzzPeer { @@ -294,6 +303,7 @@ type HandshakeMsg struct { Version uint64 NetworkID uint64 Addr *BzzAddr + LightNode bool // peerAddr is the address received in the peer handshake peerAddr *BzzAddr @@ -305,7 +315,7 @@ type HandshakeMsg struct { // String pretty prints the handshake func (bh *HandshakeMsg) String() string { - return fmt.Sprintf("Handshake: Version: %v, NetworkID: %v, Addr: %v", bh.Version, bh.NetworkID, bh.Addr) + return fmt.Sprintf("Handshake: Version: %v, NetworkID: %v, Addr: %v, LightNode: %v, peerAddr: %v", bh.Version, bh.NetworkID, bh.Addr, bh.LightNode, bh.peerAddr) } // Perform initiates the handshake and validates the remote handshake message @@ -338,6 +348,7 @@ func (b *Bzz) GetHandshake(peerID discover.NodeID) (*HandshakeMsg, bool) { Version: uint64(BzzSpec.Version), NetworkID: b.NetworkID, Addr: b.localAddr, + LightNode: b.LightNode, init: make(chan bool, 1), done: make(chan struct{}), } diff --git a/swarm/network/protocol_test.go b/swarm/network/protocol_test.go index c28bc863e362..c052c536a4f3 100644 --- a/swarm/network/protocol_test.go +++ b/swarm/network/protocol_test.go @@ -30,6 +30,11 @@ import ( p2ptest "github.com/ethereum/go-ethereum/p2p/testing" ) +const ( + TestProtocolVersion = 6 + TestProtocolNetworkID = 3 +) + var ( loglevel = flag.Int("loglevel", 2, "verbosity of logs") ) @@ -127,23 +132,30 @@ type bzzTester struct { *p2ptest.ProtocolTester addr *BzzAddr cs map[string]chan bool + bzz *Bzz } -func newBzzHandshakeTester(t *testing.T, n int, addr *BzzAddr) *bzzTester { +func newBzz(addr *BzzAddr, lightNode bool) *Bzz { config := &BzzConfig{ OverlayAddr: addr.Over(), UnderlayAddr: addr.Under(), HiveParams: NewHiveParams(), NetworkID: DefaultNetworkID, + LightNode: lightNode, } kad := NewKademlia(addr.OAddr, NewKadParams()) bzz := NewBzz(config, kad, nil, nil, nil) + return bzz +} - s := p2ptest.NewProtocolTester(t, NewNodeIDFromAddr(addr), 1, bzz.runBzz) +func newBzzHandshakeTester(t *testing.T, n int, addr *BzzAddr, lightNode bool) *bzzTester { + bzz := newBzz(addr, lightNode) + pt := p2ptest.NewProtocolTester(t, NewNodeIDFromAddr(addr), n, bzz.runBzz) return &bzzTester{ addr: addr, - ProtocolTester: s, + ProtocolTester: pt, + bzz: bzz, } } @@ -184,22 +196,24 @@ func (s *bzzTester) testHandshake(lhs, rhs *HandshakeMsg, disconnects ...*p2ptes return nil } -func correctBzzHandshake(addr *BzzAddr) *HandshakeMsg { +func correctBzzHandshake(addr *BzzAddr, lightNode bool) *HandshakeMsg { return &HandshakeMsg{ - Version: 4, - NetworkID: DefaultNetworkID, + Version: TestProtocolVersion, + NetworkID: TestProtocolNetworkID, Addr: addr, + LightNode: lightNode, } } func TestBzzHandshakeNetworkIDMismatch(t *testing.T) { + lightNode := false addr := RandomAddr() - s := newBzzHandshakeTester(t, 1, addr) + s := newBzzHandshakeTester(t, 1, addr, lightNode) id := s.IDs[0] err := s.testHandshake( - correctBzzHandshake(addr), - &HandshakeMsg{Version: 4, NetworkID: 321, Addr: NewAddrFromNodeID(id)}, + correctBzzHandshake(addr, lightNode), + &HandshakeMsg{Version: TestProtocolVersion, NetworkID: 321, Addr: NewAddrFromNodeID(id)}, &p2ptest.Disconnect{Peer: id, Error: fmt.Errorf("Handshake error: Message handler error: (msg code 0): network id mismatch 321 (!= 3)")}, ) @@ -209,14 +223,15 @@ func TestBzzHandshakeNetworkIDMismatch(t *testing.T) { } func TestBzzHandshakeVersionMismatch(t *testing.T) { + lightNode := false addr := RandomAddr() - s := newBzzHandshakeTester(t, 1, addr) + s := newBzzHandshakeTester(t, 1, addr, lightNode) id := s.IDs[0] err := s.testHandshake( - correctBzzHandshake(addr), - &HandshakeMsg{Version: 0, NetworkID: 3, Addr: NewAddrFromNodeID(id)}, - &p2ptest.Disconnect{Peer: id, Error: fmt.Errorf("Handshake error: Message handler error: (msg code 0): version mismatch 0 (!= 4)")}, + correctBzzHandshake(addr, lightNode), + &HandshakeMsg{Version: 0, NetworkID: TestProtocolNetworkID, Addr: NewAddrFromNodeID(id)}, + &p2ptest.Disconnect{Peer: id, Error: fmt.Errorf("Handshake error: Message handler error: (msg code 0): version mismatch 0 (!= %d)", TestProtocolVersion)}, ) if err != nil { @@ -225,16 +240,49 @@ func TestBzzHandshakeVersionMismatch(t *testing.T) { } func TestBzzHandshakeSuccess(t *testing.T) { + lightNode := false addr := RandomAddr() - s := newBzzHandshakeTester(t, 1, addr) + s := newBzzHandshakeTester(t, 1, addr, lightNode) id := s.IDs[0] err := s.testHandshake( - correctBzzHandshake(addr), - &HandshakeMsg{Version: 4, NetworkID: 3, Addr: NewAddrFromNodeID(id)}, + correctBzzHandshake(addr, lightNode), + &HandshakeMsg{Version: TestProtocolVersion, NetworkID: TestProtocolNetworkID, Addr: NewAddrFromNodeID(id)}, ) if err != nil { t.Fatal(err) } } + +func TestBzzHandshakeLightNode(t *testing.T) { + var lightNodeTests = []struct { + name string + lightNode bool + }{ + {"on", true}, + {"off", false}, + } + + for _, test := range lightNodeTests { + t.Run(test.name, func(t *testing.T) { + randomAddr := RandomAddr() + pt := newBzzHandshakeTester(t, 1, randomAddr, false) + id := pt.IDs[0] + addr := NewAddrFromNodeID(id) + + err := pt.testHandshake( + correctBzzHandshake(randomAddr, false), + &HandshakeMsg{Version: TestProtocolVersion, NetworkID: TestProtocolNetworkID, Addr: addr, LightNode: test.lightNode}, + ) + + if err != nil { + t.Fatal(err) + } + + if pt.bzz.handshakes[id].LightNode != test.lightNode { + t.Fatalf("peer LightNode flag is %v, should be %v", pt.bzz.handshakes[id].LightNode, test.lightNode) + } + }) + } +} diff --git a/swarm/network/simulation/bucket.go b/swarm/network/simulation/bucket.go new file mode 100644 index 000000000000..ddbedb5210c6 --- /dev/null +++ b/swarm/network/simulation/bucket.go @@ -0,0 +1,81 @@ +// Copyright 2018 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package simulation + +import ( + "github.com/ethereum/go-ethereum/p2p/discover" +) + +// BucketKey is the type that should be used for keys in simulation buckets. +type BucketKey string + +// NodeItem returns an item set in ServiceFunc function for a particualar node. +func (s *Simulation) NodeItem(id discover.NodeID, key interface{}) (value interface{}, ok bool) { + s.mu.Lock() + defer s.mu.Unlock() + + if _, ok := s.buckets[id]; !ok { + return nil, false + } + return s.buckets[id].Load(key) +} + +// SetNodeItem sets a new item associated with the node with provided NodeID. +// Buckets should be used to avoid managing separate simulation global state. +func (s *Simulation) SetNodeItem(id discover.NodeID, key interface{}, value interface{}) { + s.mu.Lock() + defer s.mu.Unlock() + + s.buckets[id].Store(key, value) +} + +// NodesItems returns a map of items from all nodes that are all set under the +// same BucketKey. +func (s *Simulation) NodesItems(key interface{}) (values map[discover.NodeID]interface{}) { + s.mu.RLock() + defer s.mu.RUnlock() + + ids := s.NodeIDs() + values = make(map[discover.NodeID]interface{}, len(ids)) + for _, id := range ids { + if _, ok := s.buckets[id]; !ok { + continue + } + if v, ok := s.buckets[id].Load(key); ok { + values[id] = v + } + } + return values +} + +// UpNodesItems returns a map of items with the same BucketKey from all nodes that are up. +func (s *Simulation) UpNodesItems(key interface{}) (values map[discover.NodeID]interface{}) { + s.mu.RLock() + defer s.mu.RUnlock() + + ids := s.UpNodeIDs() + values = make(map[discover.NodeID]interface{}) + for _, id := range ids { + if _, ok := s.buckets[id]; !ok { + continue + } + if v, ok := s.buckets[id].Load(key); ok { + values[id] = v + } + } + return values +} diff --git a/swarm/network/simulation/bucket_test.go b/swarm/network/simulation/bucket_test.go new file mode 100644 index 000000000000..461d99825cb2 --- /dev/null +++ b/swarm/network/simulation/bucket_test.go @@ -0,0 +1,155 @@ +// Copyright 2018 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package simulation + +import ( + "sync" + "testing" + + "github.com/ethereum/go-ethereum/node" + "github.com/ethereum/go-ethereum/p2p/simulations/adapters" +) + +// TestServiceBucket tests all bucket functionalities using subtests. +// It constructs a simulation of two nodes by adding items to their buckets +// in ServiceFunc constructor, then by SetNodeItem. Testing UpNodesItems +// is done by stopping one node and validating availability of its items. +func TestServiceBucket(t *testing.T) { + testKey := "Key" + testValue := "Value" + + sim := New(map[string]ServiceFunc{ + "noop": func(ctx *adapters.ServiceContext, b *sync.Map) (node.Service, func(), error) { + b.Store(testKey, testValue+ctx.Config.ID.String()) + return newNoopService(), nil, nil + }, + }) + defer sim.Close() + + id1, err := sim.AddNode() + if err != nil { + t.Fatal(err) + } + + id2, err := sim.AddNode() + if err != nil { + t.Fatal(err) + } + + t.Run("ServiceFunc bucket Store", func(t *testing.T) { + v, ok := sim.NodeItem(id1, testKey) + if !ok { + t.Fatal("bucket item not found") + } + s, ok := v.(string) + if !ok { + t.Fatal("bucket item value is not string") + } + if s != testValue+id1.String() { + t.Fatalf("expected %q, got %q", testValue+id1.String(), s) + } + + v, ok = sim.NodeItem(id2, testKey) + if !ok { + t.Fatal("bucket item not found") + } + s, ok = v.(string) + if !ok { + t.Fatal("bucket item value is not string") + } + if s != testValue+id2.String() { + t.Fatalf("expected %q, got %q", testValue+id2.String(), s) + } + }) + + customKey := "anotherKey" + customValue := "anotherValue" + + t.Run("SetNodeItem", func(t *testing.T) { + sim.SetNodeItem(id1, customKey, customValue) + + v, ok := sim.NodeItem(id1, customKey) + if !ok { + t.Fatal("bucket item not found") + } + s, ok := v.(string) + if !ok { + t.Fatal("bucket item value is not string") + } + if s != customValue { + t.Fatalf("expected %q, got %q", customValue, s) + } + + v, ok = sim.NodeItem(id2, customKey) + if ok { + t.Fatal("bucket item should not be found") + } + }) + + if err := sim.StopNode(id2); err != nil { + t.Fatal(err) + } + + t.Run("UpNodesItems", func(t *testing.T) { + items := sim.UpNodesItems(testKey) + + v, ok := items[id1] + if !ok { + t.Errorf("node 1 item not found") + } + s, ok := v.(string) + if !ok { + t.Fatal("node 1 item value is not string") + } + if s != testValue+id1.String() { + t.Fatalf("expected %q, got %q", testValue+id1.String(), s) + } + + v, ok = items[id2] + if ok { + t.Errorf("node 2 item should not be found") + } + }) + + t.Run("NodeItems", func(t *testing.T) { + items := sim.NodesItems(testKey) + + v, ok := items[id1] + if !ok { + t.Errorf("node 1 item not found") + } + s, ok := v.(string) + if !ok { + t.Fatal("node 1 item value is not string") + } + if s != testValue+id1.String() { + t.Fatalf("expected %q, got %q", testValue+id1.String(), s) + } + + v, ok = items[id2] + if !ok { + t.Errorf("node 2 item not found") + } + s, ok = v.(string) + if !ok { + t.Fatal("node 1 item value is not string") + } + if s != testValue+id2.String() { + t.Fatalf("expected %q, got %q", testValue+id2.String(), s) + } + }) +} diff --git a/swarm/network/simulation/connect.go b/swarm/network/simulation/connect.go new file mode 100644 index 000000000000..3d0f6cb3fca2 --- /dev/null +++ b/swarm/network/simulation/connect.go @@ -0,0 +1,159 @@ +// Copyright 2018 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package simulation + +import ( + "strings" + + "github.com/ethereum/go-ethereum/p2p/discover" +) + +// ConnectToPivotNode connects the node with provided NodeID +// to the pivot node, already set by Simulation.SetPivotNode method. +// It is useful when constructing a star network topology +// when simulation adds and removes nodes dynamically. +func (s *Simulation) ConnectToPivotNode(id discover.NodeID) (err error) { + pid := s.PivotNodeID() + if pid == nil { + return ErrNoPivotNode + } + return s.connect(*pid, id) +} + +// ConnectToLastNode connects the node with provided NodeID +// to the last node that is up, and avoiding connection to self. +// It is useful when constructing a chain network topology +// when simulation adds and removes nodes dynamically. +func (s *Simulation) ConnectToLastNode(id discover.NodeID) (err error) { + ids := s.UpNodeIDs() + l := len(ids) + if l < 2 { + return nil + } + lid := ids[l-1] + if lid == id { + lid = ids[l-2] + } + return s.connect(lid, id) +} + +// ConnectToRandomNode connects the node with provieded NodeID +// to a random node that is up. +func (s *Simulation) ConnectToRandomNode(id discover.NodeID) (err error) { + n := s.RandomUpNode(id) + if n == nil { + return ErrNodeNotFound + } + return s.connect(n.ID, id) +} + +// ConnectNodesFull connects all nodes one to another. +// It provides a complete connectivity in the network +// which should be rarely needed. +func (s *Simulation) ConnectNodesFull(ids []discover.NodeID) (err error) { + if ids == nil { + ids = s.UpNodeIDs() + } + l := len(ids) + for i := 0; i < l; i++ { + for j := i + 1; j < l; j++ { + err = s.connect(ids[i], ids[j]) + if err != nil { + return err + } + } + } + return nil +} + +// ConnectNodesChain connects all nodes in a chain topology. +// If ids argument is nil, all nodes that are up will be connected. +func (s *Simulation) ConnectNodesChain(ids []discover.NodeID) (err error) { + if ids == nil { + ids = s.UpNodeIDs() + } + l := len(ids) + for i := 0; i < l-1; i++ { + err = s.connect(ids[i], ids[i+1]) + if err != nil { + return err + } + } + return nil +} + +// ConnectNodesRing connects all nodes in a ring topology. +// If ids argument is nil, all nodes that are up will be connected. +func (s *Simulation) ConnectNodesRing(ids []discover.NodeID) (err error) { + if ids == nil { + ids = s.UpNodeIDs() + } + l := len(ids) + if l < 2 { + return nil + } + for i := 0; i < l-1; i++ { + err = s.connect(ids[i], ids[i+1]) + if err != nil { + return err + } + } + return s.connect(ids[l-1], ids[0]) +} + +// ConnectNodesStar connects all nodes in a star topology +// with the center at provided NodeID. +// If ids argument is nil, all nodes that are up will be connected. +func (s *Simulation) ConnectNodesStar(id discover.NodeID, ids []discover.NodeID) (err error) { + if ids == nil { + ids = s.UpNodeIDs() + } + l := len(ids) + for i := 0; i < l; i++ { + if id == ids[i] { + continue + } + err = s.connect(id, ids[i]) + if err != nil { + return err + } + } + return nil +} + +// ConnectNodesStarPivot connects all nodes in a star topology +// with the center at already set pivot node. +// If ids argument is nil, all nodes that are up will be connected. +func (s *Simulation) ConnectNodesStarPivot(ids []discover.NodeID) (err error) { + id := s.PivotNodeID() + if id == nil { + return ErrNoPivotNode + } + return s.ConnectNodesStar(*id, ids) +} + +// connect connects two nodes but ignores already connected error. +func (s *Simulation) connect(oneID, otherID discover.NodeID) error { + return ignoreAlreadyConnectedErr(s.Net.Connect(oneID, otherID)) +} + +func ignoreAlreadyConnectedErr(err error) error { + if err == nil || strings.Contains(err.Error(), "already connected") { + return nil + } + return err +} diff --git a/swarm/network/simulation/connect_test.go b/swarm/network/simulation/connect_test.go new file mode 100644 index 000000000000..10d73e4a1c7f --- /dev/null +++ b/swarm/network/simulation/connect_test.go @@ -0,0 +1,306 @@ +// Copyright 2018 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package simulation + +import ( + "testing" + + "github.com/ethereum/go-ethereum/p2p/discover" +) + +func TestConnectToPivotNode(t *testing.T) { + sim := New(noopServiceFuncMap) + defer sim.Close() + + pid, err := sim.AddNode() + if err != nil { + t.Fatal(err) + } + + sim.SetPivotNode(pid) + + id, err := sim.AddNode() + if err != nil { + t.Fatal(err) + } + + if len(sim.Net.Conns) > 0 { + t.Fatal("no connections should exist after just adding nodes") + } + + err = sim.ConnectToPivotNode(id) + if err != nil { + t.Fatal(err) + } + + if sim.Net.GetConn(id, pid) == nil { + t.Error("node did not connect to pivot node") + } +} + +func TestConnectToLastNode(t *testing.T) { + sim := New(noopServiceFuncMap) + defer sim.Close() + + n := 10 + + ids, err := sim.AddNodes(n) + if err != nil { + t.Fatal(err) + } + + id, err := sim.AddNode() + if err != nil { + t.Fatal(err) + } + + if len(sim.Net.Conns) > 0 { + t.Fatal("no connections should exist after just adding nodes") + } + + err = sim.ConnectToLastNode(id) + if err != nil { + t.Fatal(err) + } + + for _, i := range ids[:n-2] { + if sim.Net.GetConn(id, i) != nil { + t.Error("node connected to the node that is not the last") + } + } + + if sim.Net.GetConn(id, ids[n-1]) == nil { + t.Error("node did not connect to the last node") + } +} + +func TestConnectToRandomNode(t *testing.T) { + sim := New(noopServiceFuncMap) + defer sim.Close() + + n := 10 + + ids, err := sim.AddNodes(n) + if err != nil { + t.Fatal(err) + } + + if len(sim.Net.Conns) > 0 { + t.Fatal("no connections should exist after just adding nodes") + } + + err = sim.ConnectToRandomNode(ids[0]) + if err != nil { + t.Fatal(err) + } + + var cc int + for i := 0; i < n; i++ { + for j := i + 1; j < n; j++ { + if sim.Net.GetConn(ids[i], ids[j]) != nil { + cc++ + } + } + } + + if cc != 1 { + t.Errorf("expected one connection, got %v", cc) + } +} + +func TestConnectNodesFull(t *testing.T) { + sim := New(noopServiceFuncMap) + defer sim.Close() + + ids, err := sim.AddNodes(12) + if err != nil { + t.Fatal(err) + } + + if len(sim.Net.Conns) > 0 { + t.Fatal("no connections should exist after just adding nodes") + } + + err = sim.ConnectNodesFull(ids) + if err != nil { + t.Fatal(err) + } + + testFull(t, sim, ids) +} + +func testFull(t *testing.T, sim *Simulation, ids []discover.NodeID) { + n := len(ids) + var cc int + for i := 0; i < n; i++ { + for j := i + 1; j < n; j++ { + if sim.Net.GetConn(ids[i], ids[j]) != nil { + cc++ + } + } + } + + want := n * (n - 1) / 2 + + if cc != want { + t.Errorf("expected %v connection, got %v", want, cc) + } +} + +func TestConnectNodesChain(t *testing.T) { + sim := New(noopServiceFuncMap) + defer sim.Close() + + ids, err := sim.AddNodes(10) + if err != nil { + t.Fatal(err) + } + + if len(sim.Net.Conns) > 0 { + t.Fatal("no connections should exist after just adding nodes") + } + + err = sim.ConnectNodesChain(ids) + if err != nil { + t.Fatal(err) + } + + testChain(t, sim, ids) +} + +func testChain(t *testing.T, sim *Simulation, ids []discover.NodeID) { + n := len(ids) + for i := 0; i < n; i++ { + for j := i + 1; j < n; j++ { + c := sim.Net.GetConn(ids[i], ids[j]) + if i == j-1 { + if c == nil { + t.Errorf("nodes %v and %v are not connected, but they should be", i, j) + } + } else { + if c != nil { + t.Errorf("nodes %v and %v are connected, but they should not be", i, j) + } + } + } + } +} + +func TestConnectNodesRing(t *testing.T) { + sim := New(noopServiceFuncMap) + defer sim.Close() + + ids, err := sim.AddNodes(10) + if err != nil { + t.Fatal(err) + } + + if len(sim.Net.Conns) > 0 { + t.Fatal("no connections should exist after just adding nodes") + } + + err = sim.ConnectNodesRing(ids) + if err != nil { + t.Fatal(err) + } + + testRing(t, sim, ids) +} + +func testRing(t *testing.T, sim *Simulation, ids []discover.NodeID) { + n := len(ids) + for i := 0; i < n; i++ { + for j := i + 1; j < n; j++ { + c := sim.Net.GetConn(ids[i], ids[j]) + if i == j-1 || (i == 0 && j == n-1) { + if c == nil { + t.Errorf("nodes %v and %v are not connected, but they should be", i, j) + } + } else { + if c != nil { + t.Errorf("nodes %v and %v are connected, but they should not be", i, j) + } + } + } + } +} + +func TestConnectToNodesStar(t *testing.T) { + sim := New(noopServiceFuncMap) + defer sim.Close() + + ids, err := sim.AddNodes(10) + if err != nil { + t.Fatal(err) + } + + if len(sim.Net.Conns) > 0 { + t.Fatal("no connections should exist after just adding nodes") + } + + centerIndex := 2 + + err = sim.ConnectNodesStar(ids[centerIndex], ids) + if err != nil { + t.Fatal(err) + } + + testStar(t, sim, ids, centerIndex) +} + +func testStar(t *testing.T, sim *Simulation, ids []discover.NodeID, centerIndex int) { + n := len(ids) + for i := 0; i < n; i++ { + for j := i + 1; j < n; j++ { + c := sim.Net.GetConn(ids[i], ids[j]) + if i == centerIndex || j == centerIndex { + if c == nil { + t.Errorf("nodes %v and %v are not connected, but they should be", i, j) + } + } else { + if c != nil { + t.Errorf("nodes %v and %v are connected, but they should not be", i, j) + } + } + } + } +} + +func TestConnectToNodesStarPivot(t *testing.T) { + sim := New(noopServiceFuncMap) + defer sim.Close() + + ids, err := sim.AddNodes(10) + if err != nil { + t.Fatal(err) + } + + if len(sim.Net.Conns) > 0 { + t.Fatal("no connections should exist after just adding nodes") + } + + pivotIndex := 4 + + sim.SetPivotNode(ids[pivotIndex]) + + err = sim.ConnectNodesStarPivot(ids) + if err != nil { + t.Fatal(err) + } + + testStar(t, sim, ids, pivotIndex) +} diff --git a/swarm/network/simulation/events.go b/swarm/network/simulation/events.go new file mode 100644 index 000000000000..980a9a7564e9 --- /dev/null +++ b/swarm/network/simulation/events.go @@ -0,0 +1,168 @@ +// Copyright 2018 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package simulation + +import ( + "context" + "sync" + + "github.com/ethereum/go-ethereum/p2p/discover" + + "github.com/ethereum/go-ethereum/p2p" +) + +// PeerEvent is the type of the channel returned by Simulation.PeerEvents. +type PeerEvent struct { + // NodeID is the ID of node that the event is caught on. + NodeID discover.NodeID + // Event is the event that is caught. + Event *p2p.PeerEvent + // Error is the error that may have happened during event watching. + Error error +} + +// PeerEventsFilter defines a filter on PeerEvents to exclude messages with +// defined properties. Use PeerEventsFilter methods to set required options. +type PeerEventsFilter struct { + t *p2p.PeerEventType + protocol *string + msgCode *uint64 +} + +// NewPeerEventsFilter returns a new PeerEventsFilter instance. +func NewPeerEventsFilter() *PeerEventsFilter { + return &PeerEventsFilter{} +} + +// Type sets the filter to only one peer event type. +func (f *PeerEventsFilter) Type(t p2p.PeerEventType) *PeerEventsFilter { + f.t = &t + return f +} + +// Protocol sets the filter to only one message protocol. +func (f *PeerEventsFilter) Protocol(p string) *PeerEventsFilter { + f.protocol = &p + return f +} + +// MsgCode sets the filter to only one msg code. +func (f *PeerEventsFilter) MsgCode(c uint64) *PeerEventsFilter { + f.msgCode = &c + return f +} + +// PeerEvents returns a channel of events that are captured by admin peerEvents +// subscription nodes with provided NodeIDs. Additional filters can be set to ignore +// events that are not relevant. +func (s *Simulation) PeerEvents(ctx context.Context, ids []discover.NodeID, filters ...*PeerEventsFilter) <-chan PeerEvent { + eventC := make(chan PeerEvent) + + // wait group to make sure all subscriptions to admin peerEvents are established + // before this function returns. + var subsWG sync.WaitGroup + for _, id := range ids { + s.shutdownWG.Add(1) + subsWG.Add(1) + go func(id discover.NodeID) { + defer s.shutdownWG.Done() + + client, err := s.Net.GetNode(id).Client() + if err != nil { + subsWG.Done() + eventC <- PeerEvent{NodeID: id, Error: err} + return + } + events := make(chan *p2p.PeerEvent) + sub, err := client.Subscribe(ctx, "admin", events, "peerEvents") + if err != nil { + subsWG.Done() + eventC <- PeerEvent{NodeID: id, Error: err} + return + } + defer sub.Unsubscribe() + + subsWG.Done() + + for { + select { + case <-ctx.Done(): + if err := ctx.Err(); err != nil { + select { + case eventC <- PeerEvent{NodeID: id, Error: err}: + case <-s.Done(): + } + } + return + case <-s.Done(): + return + case e := <-events: + match := len(filters) == 0 // if there are no filters match all events + for _, f := range filters { + if f.t != nil && *f.t != e.Type { + continue + } + if f.protocol != nil && *f.protocol != e.Protocol { + continue + } + if f.msgCode != nil && e.MsgCode != nil && *f.msgCode != *e.MsgCode { + continue + } + // all filter parameters matched, break the loop + match = true + break + } + if match { + select { + case eventC <- PeerEvent{NodeID: id, Event: e}: + case <-ctx.Done(): + if err := ctx.Err(); err != nil { + select { + case eventC <- PeerEvent{NodeID: id, Error: err}: + case <-s.Done(): + } + } + return + case <-s.Done(): + return + } + } + case err := <-sub.Err(): + if err != nil { + select { + case eventC <- PeerEvent{NodeID: id, Error: err}: + case <-ctx.Done(): + if err := ctx.Err(); err != nil { + select { + case eventC <- PeerEvent{NodeID: id, Error: err}: + case <-s.Done(): + } + } + return + case <-s.Done(): + return + } + } + } + } + }(id) + } + + // wait all subscriptions + subsWG.Wait() + return eventC +} diff --git a/swarm/network/simulation/events_test.go b/swarm/network/simulation/events_test.go new file mode 100644 index 000000000000..0c185d977453 --- /dev/null +++ b/swarm/network/simulation/events_test.go @@ -0,0 +1,104 @@ +// Copyright 2018 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package simulation + +import ( + "context" + "sync" + "testing" + "time" +) + +// TestPeerEvents creates simulation, adds two nodes, +// register for peer events, connects nodes in a chain +// and waits for the number of connection events to +// be received. +func TestPeerEvents(t *testing.T) { + sim := New(noopServiceFuncMap) + defer sim.Close() + + _, err := sim.AddNodes(2) + if err != nil { + t.Fatal(err) + } + + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + events := sim.PeerEvents(ctx, sim.NodeIDs()) + + // two nodes -> two connection events + expectedEventCount := 2 + + var wg sync.WaitGroup + wg.Add(expectedEventCount) + + go func() { + for e := range events { + if e.Error != nil { + if e.Error == context.Canceled { + return + } + t.Error(e.Error) + continue + } + wg.Done() + } + }() + + err = sim.ConnectNodesChain(sim.NodeIDs()) + if err != nil { + t.Fatal(err) + } + + wg.Wait() +} + +func TestPeerEventsTimeout(t *testing.T) { + sim := New(noopServiceFuncMap) + defer sim.Close() + + _, err := sim.AddNodes(2) + if err != nil { + t.Fatal(err) + } + + ctx, cancel := context.WithTimeout(context.Background(), 100*time.Millisecond) + defer cancel() + events := sim.PeerEvents(ctx, sim.NodeIDs()) + + done := make(chan struct{}) + go func() { + for e := range events { + if e.Error == context.Canceled { + return + } + if e.Error == context.DeadlineExceeded { + close(done) + return + } else { + t.Fatal(e.Error) + } + } + }() + + select { + case <-time.After(time.Second): + t.Error("no context deadline received") + case <-done: + // all good, context deadline detected + } +} diff --git a/swarm/network/simulation/example_test.go b/swarm/network/simulation/example_test.go new file mode 100644 index 000000000000..2a8116921aa0 --- /dev/null +++ b/swarm/network/simulation/example_test.go @@ -0,0 +1,140 @@ +// Copyright 2018 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package simulation_test + +import ( + "context" + "fmt" + "sync" + "time" + + "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/node" + "github.com/ethereum/go-ethereum/p2p" + "github.com/ethereum/go-ethereum/p2p/simulations/adapters" + "github.com/ethereum/go-ethereum/swarm/network" + "github.com/ethereum/go-ethereum/swarm/network/simulation" +) + +// Every node can have a Kademlia associated using the node bucket under +// BucketKeyKademlia key. This allows to use WaitTillHealthy to block until +// all nodes have the their Kadmlias healthy. +func ExampleSimulation_WaitTillHealthy() { + sim := simulation.New(map[string]simulation.ServiceFunc{ + "bzz": func(ctx *adapters.ServiceContext, b *sync.Map) (node.Service, func(), error) { + addr := network.NewAddrFromNodeID(ctx.Config.ID) + hp := network.NewHiveParams() + hp.Discovery = false + config := &network.BzzConfig{ + OverlayAddr: addr.Over(), + UnderlayAddr: addr.Under(), + HiveParams: hp, + } + kad := network.NewKademlia(addr.Over(), network.NewKadParams()) + // store kademlia in node's bucket under BucketKeyKademlia + // so that it can be found by WaitTillHealthy method. + b.Store(simulation.BucketKeyKademlia, kad) + return network.NewBzz(config, kad, nil, nil, nil), nil, nil + }, + }) + defer sim.Close() + + _, err := sim.AddNodesAndConnectRing(10) + if err != nil { + // handle error properly... + panic(err) + } + + ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second) + defer cancel() + ill, err := sim.WaitTillHealthy(ctx, 2) + if err != nil { + // inspect the latest detected not healthy kademlias + for id, kad := range ill { + fmt.Println("Node", id) + fmt.Println(kad.String()) + } + // handle error... + } + + // continue with the test +} + +// Watch all peer events in the simulation network, buy receiving from a channel. +func ExampleSimulation_PeerEvents() { + sim := simulation.New(nil) + defer sim.Close() + + events := sim.PeerEvents(context.Background(), sim.NodeIDs()) + + go func() { + for e := range events { + if e.Error != nil { + log.Error("peer event", "err", e.Error) + continue + } + log.Info("peer event", "node", e.NodeID, "peer", e.Event.Peer, "msgcode", e.Event.MsgCode) + } + }() +} + +// Detect when a nodes drop a peer. +func ExampleSimulation_PeerEvents_disconnections() { + sim := simulation.New(nil) + defer sim.Close() + + disconnections := sim.PeerEvents( + context.Background(), + sim.NodeIDs(), + simulation.NewPeerEventsFilter().Type(p2p.PeerEventTypeDrop), + ) + + go func() { + for d := range disconnections { + if d.Error != nil { + log.Error("peer drop", "err", d.Error) + continue + } + log.Warn("peer drop", "node", d.NodeID, "peer", d.Event.Peer) + } + }() +} + +// Watch multiple types of events or messages. In this case, they differ only +// by MsgCode, but filters can be set for different types or protocols, too. +func ExampleSimulation_PeerEvents_multipleFilters() { + sim := simulation.New(nil) + defer sim.Close() + + msgs := sim.PeerEvents( + context.Background(), + sim.NodeIDs(), + // Watch when bzz messages 1 and 4 are received. + simulation.NewPeerEventsFilter().Type(p2p.PeerEventTypeMsgRecv).Protocol("bzz").MsgCode(1), + simulation.NewPeerEventsFilter().Type(p2p.PeerEventTypeMsgRecv).Protocol("bzz").MsgCode(4), + ) + + go func() { + for m := range msgs { + if m.Error != nil { + log.Error("bzz message", "err", m.Error) + continue + } + log.Info("bzz message", "node", m.NodeID, "peer", m.Event.Peer) + } + }() +} diff --git a/swarm/network/simulation/http.go b/swarm/network/simulation/http.go new file mode 100644 index 000000000000..69ae3baec289 --- /dev/null +++ b/swarm/network/simulation/http.go @@ -0,0 +1,68 @@ +// Copyright 2018 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package simulation + +import ( + "fmt" + "net/http" + + "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/p2p/simulations" +) + +// Package defaults. +var ( + DefaultHTTPSimAddr = ":8888" +) + +//WithServer implements the builder pattern constructor for Simulation to +//start with a HTTP server +func (s *Simulation) WithServer(addr string) *Simulation { + //assign default addr if nothing provided + if addr == "" { + addr = DefaultHTTPSimAddr + } + log.Info(fmt.Sprintf("Initializing simulation server on %s...", addr)) + //initialize the HTTP server + s.handler = simulations.NewServer(s.Net) + s.runC = make(chan struct{}) + //add swarm specific routes to the HTTP server + s.addSimulationRoutes() + s.httpSrv = &http.Server{ + Addr: addr, + Handler: s.handler, + } + go func() { + err := s.httpSrv.ListenAndServe() + if err != nil { + log.Error("Error starting the HTTP server", "error", err) + } + }() + return s +} + +//register additional HTTP routes +func (s *Simulation) addSimulationRoutes() { + s.handler.POST("/runsim", s.RunSimulation) +} + +// RunSimulation is the actual POST endpoint runner +func (s *Simulation) RunSimulation(w http.ResponseWriter, req *http.Request) { + log.Debug("RunSimulation endpoint running") + s.runC <- struct{}{} + w.WriteHeader(http.StatusOK) +} diff --git a/swarm/network/simulation/http_test.go b/swarm/network/simulation/http_test.go new file mode 100644 index 000000000000..775cf9219046 --- /dev/null +++ b/swarm/network/simulation/http_test.go @@ -0,0 +1,109 @@ +// Copyright 2018 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package simulation + +import ( + "context" + "fmt" + "net/http" + "sync" + "testing" + "time" + + "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/node" + "github.com/ethereum/go-ethereum/p2p/simulations/adapters" +) + +func TestSimulationWithHTTPServer(t *testing.T) { + log.Debug("Init simulation") + + ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second) + defer cancel() + + sim := New( + map[string]ServiceFunc{ + "noop": func(_ *adapters.ServiceContext, b *sync.Map) (node.Service, func(), error) { + return newNoopService(), nil, nil + }, + }).WithServer(DefaultHTTPSimAddr) + defer sim.Close() + log.Debug("Done.") + + _, err := sim.AddNode() + if err != nil { + t.Fatal(err) + } + + log.Debug("Starting sim round and let it time out...") + //first test that running without sending to the channel will actually + //block the simulation, so let it time out + result := sim.Run(ctx, func(ctx context.Context, sim *Simulation) error { + log.Debug("Just start the sim without any action and wait for the timeout") + //ensure with a Sleep that simulation doesn't terminate before the timeout + time.Sleep(2 * time.Second) + return nil + }) + + if result.Error != nil { + if result.Error.Error() == "context deadline exceeded" { + log.Debug("Expected timeout error received") + } else { + t.Fatal(result.Error) + } + } + + //now run it again and send the expected signal on the waiting channel, + //then close the simulation + log.Debug("Starting sim round and wait for frontend signal...") + //this time the timeout should be long enough so that it doesn't kick in too early + ctx, cancel2 := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel2() + go sendRunSignal(t) + result = sim.Run(ctx, func(ctx context.Context, sim *Simulation) error { + log.Debug("This run waits for the run signal from `frontend`...") + //ensure with a Sleep that simulation doesn't terminate before the signal is received + time.Sleep(2 * time.Second) + return nil + }) + if result.Error != nil { + t.Fatal(result.Error) + } + log.Debug("Test terminated successfully") +} + +func sendRunSignal(t *testing.T) { + //We need to first wait for the sim HTTP server to start running... + time.Sleep(2 * time.Second) + //then we can send the signal + + log.Debug("Sending run signal to simulation: POST /runsim...") + resp, err := http.Post(fmt.Sprintf("http://localhost%s/runsim", DefaultHTTPSimAddr), "application/json", nil) + if err != nil { + t.Fatalf("Request failed: %v", err) + } + defer func() { + err := resp.Body.Close() + if err != nil { + log.Error("Error closing response body", "err", err) + } + }() + log.Debug("Signal sent") + if resp.StatusCode != http.StatusOK { + t.Fatalf("err %s", resp.Status) + } +} diff --git a/swarm/network/simulation/kademlia.go b/swarm/network/simulation/kademlia.go new file mode 100644 index 000000000000..3e45cb0ce155 --- /dev/null +++ b/swarm/network/simulation/kademlia.go @@ -0,0 +1,96 @@ +// Copyright 2018 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package simulation + +import ( + "context" + "encoding/hex" + "time" + + "github.com/ethereum/go-ethereum/p2p/discover" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/swarm/network" +) + +// BucketKeyKademlia is the key to be used for storing the kademlia +// instance for particuar node, usually inside the ServiceFunc function. +var BucketKeyKademlia BucketKey = "kademlia" + +// WaitTillHealthy is blocking until the health of all kademlias is true. +// If error is not nil, a map of kademlia that was found not healthy is returned. +func (s *Simulation) WaitTillHealthy(ctx context.Context, kadMinProxSize int) (ill map[discover.NodeID]*network.Kademlia, err error) { + // Prepare PeerPot map for checking Kademlia health + var ppmap map[string]*network.PeerPot + kademlias := s.kademlias() + addrs := make([][]byte, 0, len(kademlias)) + for _, k := range kademlias { + addrs = append(addrs, k.BaseAddr()) + } + ppmap = network.NewPeerPotMap(kadMinProxSize, addrs) + + // Wait for healthy Kademlia on every node before checking files + ticker := time.NewTicker(200 * time.Millisecond) + defer ticker.Stop() + + ill = make(map[discover.NodeID]*network.Kademlia) + for { + select { + case <-ctx.Done(): + return ill, ctx.Err() + case <-ticker.C: + for k := range ill { + delete(ill, k) + } + log.Debug("kademlia health check", "addr count", len(addrs)) + for id, k := range kademlias { + //PeerPot for this node + addr := common.Bytes2Hex(k.BaseAddr()) + pp := ppmap[addr] + //call Healthy RPC + h := k.Healthy(pp) + //print info + log.Debug(k.String()) + log.Debug("kademlia", "empty bins", pp.EmptyBins, "gotNN", h.GotNN, "knowNN", h.KnowNN, "full", h.Full) + log.Debug("kademlia", "health", h.GotNN && h.KnowNN && h.Full, "addr", hex.EncodeToString(k.BaseAddr()), "node", id) + log.Debug("kademlia", "ill condition", !h.GotNN || !h.Full, "addr", hex.EncodeToString(k.BaseAddr()), "node", id) + if !h.GotNN || !h.Full { + ill[id] = k + } + } + if len(ill) == 0 { + return nil, nil + } + } + } +} + +// kademlias returns all Kademlia instances that are set +// in simulation bucket. +func (s *Simulation) kademlias() (ks map[discover.NodeID]*network.Kademlia) { + items := s.UpNodesItems(BucketKeyKademlia) + ks = make(map[discover.NodeID]*network.Kademlia, len(items)) + for id, v := range items { + k, ok := v.(*network.Kademlia) + if !ok { + continue + } + ks[id] = k + } + return ks +} diff --git a/swarm/network/simulation/kademlia_test.go b/swarm/network/simulation/kademlia_test.go new file mode 100644 index 000000000000..d11fe7e41f42 --- /dev/null +++ b/swarm/network/simulation/kademlia_test.go @@ -0,0 +1,67 @@ +// Copyright 2018 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package simulation + +import ( + "context" + "sync" + "testing" + "time" + + "github.com/ethereum/go-ethereum/node" + "github.com/ethereum/go-ethereum/p2p/simulations/adapters" + "github.com/ethereum/go-ethereum/swarm/network" +) + +func TestWaitTillHealthy(t *testing.T) { + sim := New(map[string]ServiceFunc{ + "bzz": func(ctx *adapters.ServiceContext, b *sync.Map) (node.Service, func(), error) { + addr := network.NewAddrFromNodeID(ctx.Config.ID) + hp := network.NewHiveParams() + hp.Discovery = false + config := &network.BzzConfig{ + OverlayAddr: addr.Over(), + UnderlayAddr: addr.Under(), + HiveParams: hp, + } + kad := network.NewKademlia(addr.Over(), network.NewKadParams()) + // store kademlia in node's bucket under BucketKeyKademlia + // so that it can be found by WaitTillHealthy method. + b.Store(BucketKeyKademlia, kad) + return network.NewBzz(config, kad, nil, nil, nil), nil, nil + }, + }) + defer sim.Close() + + _, err := sim.AddNodesAndConnectRing(10) + if err != nil { + t.Fatal(err) + } + + ctx, cancel := context.WithTimeout(context.Background(), 120*time.Second) + defer cancel() + ill, err := sim.WaitTillHealthy(ctx, 2) + if err != nil { + for id, kad := range ill { + t.Log("Node", id) + t.Log(kad.String()) + } + if err != nil { + t.Fatal(err) + } + } +} diff --git a/swarm/network/simulation/node.go b/swarm/network/simulation/node.go new file mode 100644 index 000000000000..784588fa6e27 --- /dev/null +++ b/swarm/network/simulation/node.go @@ -0,0 +1,362 @@ +// Copyright 2018 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package simulation + +import ( + "encoding/json" + "errors" + "io/ioutil" + "math/rand" + "os" + "time" + + "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/p2p/discover" + "github.com/ethereum/go-ethereum/p2p/simulations" + "github.com/ethereum/go-ethereum/p2p/simulations/adapters" +) + +// NodeIDs returns NodeIDs for all nodes in the network. +func (s *Simulation) NodeIDs() (ids []discover.NodeID) { + nodes := s.Net.GetNodes() + ids = make([]discover.NodeID, len(nodes)) + for i, node := range nodes { + ids[i] = node.ID() + } + return ids +} + +// UpNodeIDs returns NodeIDs for nodes that are up in the network. +func (s *Simulation) UpNodeIDs() (ids []discover.NodeID) { + nodes := s.Net.GetNodes() + for _, node := range nodes { + if node.Up { + ids = append(ids, node.ID()) + } + } + return ids +} + +// DownNodeIDs returns NodeIDs for nodes that are stopped in the network. +func (s *Simulation) DownNodeIDs() (ids []discover.NodeID) { + nodes := s.Net.GetNodes() + for _, node := range nodes { + if !node.Up { + ids = append(ids, node.ID()) + } + } + return ids +} + +// AddNodeOption defines the option that can be passed +// to Simulation.AddNode method. +type AddNodeOption func(*adapters.NodeConfig) + +// AddNodeWithMsgEvents sets the EnableMsgEvents option +// to NodeConfig. +func AddNodeWithMsgEvents(enable bool) AddNodeOption { + return func(o *adapters.NodeConfig) { + o.EnableMsgEvents = enable + } +} + +// AddNodeWithService specifies a service that should be +// started on a node. This option can be repeated as variadic +// argument toe AddNode and other add node related methods. +// If AddNodeWithService is not specified, all services will be started. +func AddNodeWithService(serviceName string) AddNodeOption { + return func(o *adapters.NodeConfig) { + o.Services = append(o.Services, serviceName) + } +} + +// AddNode creates a new node with random configuration, +// applies provided options to the config and adds the node to network. +// By default all services will be started on a node. If one or more +// AddNodeWithService option are provided, only specified services will be started. +func (s *Simulation) AddNode(opts ...AddNodeOption) (id discover.NodeID, err error) { + conf := adapters.RandomNodeConfig() + for _, o := range opts { + o(conf) + } + if len(conf.Services) == 0 { + conf.Services = s.serviceNames + } + node, err := s.Net.NewNodeWithConfig(conf) + if err != nil { + return id, err + } + return node.ID(), s.Net.Start(node.ID()) +} + +// AddNodes creates new nodes with random configurations, +// applies provided options to the config and adds nodes to network. +func (s *Simulation) AddNodes(count int, opts ...AddNodeOption) (ids []discover.NodeID, err error) { + ids = make([]discover.NodeID, 0, count) + for i := 0; i < count; i++ { + id, err := s.AddNode(opts...) + if err != nil { + return nil, err + } + ids = append(ids, id) + } + return ids, nil +} + +// AddNodesAndConnectFull is a helpper method that combines +// AddNodes and ConnectNodesFull. Only new nodes will be connected. +func (s *Simulation) AddNodesAndConnectFull(count int, opts ...AddNodeOption) (ids []discover.NodeID, err error) { + if count < 2 { + return nil, errors.New("count of nodes must be at least 2") + } + ids, err = s.AddNodes(count, opts...) + if err != nil { + return nil, err + } + err = s.ConnectNodesFull(ids) + if err != nil { + return nil, err + } + return ids, nil +} + +// AddNodesAndConnectChain is a helpper method that combines +// AddNodes and ConnectNodesChain. The chain will be continued from the last +// added node, if there is one in simulation using ConnectToLastNode method. +func (s *Simulation) AddNodesAndConnectChain(count int, opts ...AddNodeOption) (ids []discover.NodeID, err error) { + if count < 2 { + return nil, errors.New("count of nodes must be at least 2") + } + id, err := s.AddNode(opts...) + if err != nil { + return nil, err + } + err = s.ConnectToLastNode(id) + if err != nil { + return nil, err + } + ids, err = s.AddNodes(count-1, opts...) + if err != nil { + return nil, err + } + ids = append([]discover.NodeID{id}, ids...) + err = s.ConnectNodesChain(ids) + if err != nil { + return nil, err + } + return ids, nil +} + +// AddNodesAndConnectRing is a helpper method that combines +// AddNodes and ConnectNodesRing. +func (s *Simulation) AddNodesAndConnectRing(count int, opts ...AddNodeOption) (ids []discover.NodeID, err error) { + if count < 2 { + return nil, errors.New("count of nodes must be at least 2") + } + ids, err = s.AddNodes(count, opts...) + if err != nil { + return nil, err + } + err = s.ConnectNodesRing(ids) + if err != nil { + return nil, err + } + return ids, nil +} + +// AddNodesAndConnectStar is a helpper method that combines +// AddNodes and ConnectNodesStar. +func (s *Simulation) AddNodesAndConnectStar(count int, opts ...AddNodeOption) (ids []discover.NodeID, err error) { + if count < 2 { + return nil, errors.New("count of nodes must be at least 2") + } + ids, err = s.AddNodes(count, opts...) + if err != nil { + return nil, err + } + err = s.ConnectNodesStar(ids[0], ids[1:]) + if err != nil { + return nil, err + } + return ids, nil +} + +//UploadSnapshot uploads a snapshot to the simulation +//This method tries to open the json file provided, applies the config to all nodes +//and then loads the snapshot into the Simulation network +func (s *Simulation) UploadSnapshot(snapshotFile string, opts ...AddNodeOption) error { + f, err := os.Open(snapshotFile) + if err != nil { + return err + } + defer func() { + err := f.Close() + if err != nil { + log.Error("Error closing snapshot file", "err", err) + } + }() + jsonbyte, err := ioutil.ReadAll(f) + if err != nil { + return err + } + var snap simulations.Snapshot + err = json.Unmarshal(jsonbyte, &snap) + if err != nil { + return err + } + + //the snapshot probably has the property EnableMsgEvents not set + //just in case, set it to true! + //(we need this to wait for messages before uploading) + for _, n := range snap.Nodes { + n.Node.Config.EnableMsgEvents = true + n.Node.Config.Services = s.serviceNames + for _, o := range opts { + o(n.Node.Config) + } + } + + log.Info("Waiting for p2p connections to be established...") + + //now we can load the snapshot + err = s.Net.Load(&snap) + if err != nil { + return err + } + log.Info("Snapshot loaded") + return nil +} + +// SetPivotNode sets the NodeID of the network's pivot node. +// Pivot node is just a specific node that should be treated +// differently then other nodes in test. SetPivotNode and +// PivotNodeID are just a convenient functions to set and +// retrieve it. +func (s *Simulation) SetPivotNode(id discover.NodeID) { + s.mu.Lock() + defer s.mu.Unlock() + s.pivotNodeID = &id +} + +// PivotNodeID returns NodeID of the pivot node set by +// Simulation.SetPivotNode method. +func (s *Simulation) PivotNodeID() (id *discover.NodeID) { + s.mu.Lock() + defer s.mu.Unlock() + return s.pivotNodeID +} + +// StartNode starts a node by NodeID. +func (s *Simulation) StartNode(id discover.NodeID) (err error) { + return s.Net.Start(id) +} + +// StartRandomNode starts a random node. +func (s *Simulation) StartRandomNode() (id discover.NodeID, err error) { + n := s.randomDownNode() + if n == nil { + return id, ErrNodeNotFound + } + return n.ID, s.Net.Start(n.ID) +} + +// StartRandomNodes starts random nodes. +func (s *Simulation) StartRandomNodes(count int) (ids []discover.NodeID, err error) { + ids = make([]discover.NodeID, 0, count) + downIDs := s.DownNodeIDs() + for i := 0; i < count; i++ { + n := s.randomNode(downIDs, ids...) + if n == nil { + return nil, ErrNodeNotFound + } + err = s.Net.Start(n.ID) + if err != nil { + return nil, err + } + ids = append(ids, n.ID) + } + return ids, nil +} + +// StopNode stops a node by NodeID. +func (s *Simulation) StopNode(id discover.NodeID) (err error) { + return s.Net.Stop(id) +} + +// StopRandomNode stops a random node. +func (s *Simulation) StopRandomNode() (id discover.NodeID, err error) { + n := s.RandomUpNode() + if n == nil { + return id, ErrNodeNotFound + } + return n.ID, s.Net.Stop(n.ID) +} + +// StopRandomNodes stops random nodes. +func (s *Simulation) StopRandomNodes(count int) (ids []discover.NodeID, err error) { + ids = make([]discover.NodeID, 0, count) + upIDs := s.UpNodeIDs() + for i := 0; i < count; i++ { + n := s.randomNode(upIDs, ids...) + if n == nil { + return nil, ErrNodeNotFound + } + err = s.Net.Stop(n.ID) + if err != nil { + return nil, err + } + ids = append(ids, n.ID) + } + return ids, nil +} + +// seed the random generator for Simulation.randomNode. +func init() { + rand.Seed(time.Now().UnixNano()) +} + +// RandomUpNode returns a random SimNode that is up. +// Arguments are NodeIDs for nodes that should not be returned. +func (s *Simulation) RandomUpNode(exclude ...discover.NodeID) *adapters.SimNode { + return s.randomNode(s.UpNodeIDs(), exclude...) +} + +// randomDownNode returns a random SimNode that is not up. +func (s *Simulation) randomDownNode(exclude ...discover.NodeID) *adapters.SimNode { + return s.randomNode(s.DownNodeIDs(), exclude...) +} + +// randomNode returns a random SimNode from the slice of NodeIDs. +func (s *Simulation) randomNode(ids []discover.NodeID, exclude ...discover.NodeID) *adapters.SimNode { + for _, e := range exclude { + var i int + for _, id := range ids { + if id == e { + ids = append(ids[:i], ids[i+1:]...) + } else { + i++ + } + } + } + l := len(ids) + if l == 0 { + return nil + } + n := s.Net.GetNode(ids[rand.Intn(l)]) + node, _ := n.Node.(*adapters.SimNode) + return node +} diff --git a/swarm/network/simulation/node_test.go b/swarm/network/simulation/node_test.go new file mode 100644 index 000000000000..94f0b4fac05d --- /dev/null +++ b/swarm/network/simulation/node_test.go @@ -0,0 +1,462 @@ +// Copyright 2018 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package simulation + +import ( + "context" + "fmt" + "sync" + "testing" + "time" + + "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/node" + "github.com/ethereum/go-ethereum/p2p/discover" + "github.com/ethereum/go-ethereum/p2p/simulations/adapters" + "github.com/ethereum/go-ethereum/swarm/network" +) + +func TestUpDownNodeIDs(t *testing.T) { + sim := New(noopServiceFuncMap) + defer sim.Close() + + ids, err := sim.AddNodes(10) + if err != nil { + t.Fatal(err) + } + + gotIDs := sim.NodeIDs() + + if !equalNodeIDs(ids, gotIDs) { + t.Error("returned nodes are not equal to added ones") + } + + stoppedIDs, err := sim.StopRandomNodes(3) + if err != nil { + t.Fatal(err) + } + + gotIDs = sim.UpNodeIDs() + + for _, id := range gotIDs { + if !sim.Net.GetNode(id).Up { + t.Errorf("node %s should not be down", id) + } + } + + if !equalNodeIDs(ids, append(gotIDs, stoppedIDs...)) { + t.Error("returned nodes are not equal to added ones") + } + + gotIDs = sim.DownNodeIDs() + + for _, id := range gotIDs { + if sim.Net.GetNode(id).Up { + t.Errorf("node %s should not be up", id) + } + } + + if !equalNodeIDs(stoppedIDs, gotIDs) { + t.Error("returned nodes are not equal to the stopped ones") + } +} + +func equalNodeIDs(one, other []discover.NodeID) bool { + if len(one) != len(other) { + return false + } + var count int + for _, a := range one { + var found bool + for _, b := range other { + if a == b { + found = true + break + } + } + if found { + count++ + } else { + return false + } + } + return count == len(one) +} + +func TestAddNode(t *testing.T) { + sim := New(noopServiceFuncMap) + defer sim.Close() + + id, err := sim.AddNode() + if err != nil { + t.Fatal(err) + } + + n := sim.Net.GetNode(id) + if n == nil { + t.Fatal("node not found") + } + + if !n.Up { + t.Error("node not started") + } +} + +func TestAddNodeWithMsgEvents(t *testing.T) { + sim := New(noopServiceFuncMap) + defer sim.Close() + + id, err := sim.AddNode(AddNodeWithMsgEvents(true)) + if err != nil { + t.Fatal(err) + } + + if !sim.Net.GetNode(id).Config.EnableMsgEvents { + t.Error("EnableMsgEvents is false") + } + + id, err = sim.AddNode(AddNodeWithMsgEvents(false)) + if err != nil { + t.Fatal(err) + } + + if sim.Net.GetNode(id).Config.EnableMsgEvents { + t.Error("EnableMsgEvents is true") + } +} + +func TestAddNodeWithService(t *testing.T) { + sim := New(map[string]ServiceFunc{ + "noop1": noopServiceFunc, + "noop2": noopServiceFunc, + }) + defer sim.Close() + + id, err := sim.AddNode(AddNodeWithService("noop1")) + if err != nil { + t.Fatal(err) + } + + n := sim.Net.GetNode(id).Node.(*adapters.SimNode) + if n.Service("noop1") == nil { + t.Error("service noop1 not found on node") + } + if n.Service("noop2") != nil { + t.Error("service noop2 should not be found on node") + } +} + +func TestAddNodes(t *testing.T) { + sim := New(noopServiceFuncMap) + defer sim.Close() + + nodesCount := 12 + + ids, err := sim.AddNodes(nodesCount) + if err != nil { + t.Fatal(err) + } + + count := len(ids) + if count != nodesCount { + t.Errorf("expected %v nodes, got %v", nodesCount, count) + } + + count = len(sim.Net.GetNodes()) + if count != nodesCount { + t.Errorf("expected %v nodes, got %v", nodesCount, count) + } +} + +func TestAddNodesAndConnectFull(t *testing.T) { + sim := New(noopServiceFuncMap) + defer sim.Close() + + n := 12 + + ids, err := sim.AddNodesAndConnectFull(n) + if err != nil { + t.Fatal(err) + } + + testFull(t, sim, ids) +} + +func TestAddNodesAndConnectChain(t *testing.T) { + sim := New(noopServiceFuncMap) + defer sim.Close() + + _, err := sim.AddNodesAndConnectChain(12) + if err != nil { + t.Fatal(err) + } + + // add another set of nodes to test + // if two chains are connected + _, err = sim.AddNodesAndConnectChain(7) + if err != nil { + t.Fatal(err) + } + + testChain(t, sim, sim.UpNodeIDs()) +} + +func TestAddNodesAndConnectRing(t *testing.T) { + sim := New(noopServiceFuncMap) + defer sim.Close() + + ids, err := sim.AddNodesAndConnectRing(12) + if err != nil { + t.Fatal(err) + } + + testRing(t, sim, ids) +} + +func TestAddNodesAndConnectStar(t *testing.T) { + sim := New(noopServiceFuncMap) + defer sim.Close() + + ids, err := sim.AddNodesAndConnectStar(12) + if err != nil { + t.Fatal(err) + } + + testStar(t, sim, ids, 0) +} + +//To test that uploading a snapshot works +func TestUploadSnapshot(t *testing.T) { + log.Debug("Creating simulation") + s := New(map[string]ServiceFunc{ + "bzz": func(ctx *adapters.ServiceContext, b *sync.Map) (node.Service, func(), error) { + addr := network.NewAddrFromNodeID(ctx.Config.ID) + hp := network.NewHiveParams() + hp.Discovery = false + config := &network.BzzConfig{ + OverlayAddr: addr.Over(), + UnderlayAddr: addr.Under(), + HiveParams: hp, + } + kad := network.NewKademlia(addr.Over(), network.NewKadParams()) + return network.NewBzz(config, kad, nil, nil, nil), nil, nil + }, + }) + defer s.Close() + + nodeCount := 16 + log.Debug("Uploading snapshot") + err := s.UploadSnapshot(fmt.Sprintf("../stream/testing/snapshot_%d.json", nodeCount)) + if err != nil { + t.Fatalf("Error uploading snapshot to simulation network: %v", err) + } + + ctx := context.Background() + log.Debug("Starting simulation...") + s.Run(ctx, func(ctx context.Context, sim *Simulation) error { + log.Debug("Checking") + nodes := sim.UpNodeIDs() + if len(nodes) != nodeCount { + t.Fatal("Simulation network node number doesn't match snapshot node number") + } + return nil + }) + log.Debug("Done.") +} + +func TestPivotNode(t *testing.T) { + sim := New(noopServiceFuncMap) + defer sim.Close() + + id, err := sim.AddNode() + if err != nil { + t.Fatal(err) + } + + id2, err := sim.AddNode() + if err != nil { + t.Fatal(err) + } + + if sim.PivotNodeID() != nil { + t.Error("expected no pivot node") + } + + sim.SetPivotNode(id) + + pid := sim.PivotNodeID() + + if pid == nil { + t.Error("pivot node not set") + } else if *pid != id { + t.Errorf("expected pivot node %s, got %s", id, *pid) + } + + sim.SetPivotNode(id2) + + pid = sim.PivotNodeID() + + if pid == nil { + t.Error("pivot node not set") + } else if *pid != id2 { + t.Errorf("expected pivot node %s, got %s", id2, *pid) + } +} + +func TestStartStopNode(t *testing.T) { + sim := New(noopServiceFuncMap) + defer sim.Close() + + id, err := sim.AddNode() + if err != nil { + t.Fatal(err) + } + + n := sim.Net.GetNode(id) + if n == nil { + t.Fatal("node not found") + } + if !n.Up { + t.Error("node not started") + } + + err = sim.StopNode(id) + if err != nil { + t.Fatal(err) + } + if n.Up { + t.Error("node not stopped") + } + + // Sleep here to ensure that Network.watchPeerEvents defer function + // has set the `node.Up = false` before we start the node again. + // p2p/simulations/network.go:215 + // + // The same node is stopped and started again, and upon start + // watchPeerEvents is started in a goroutine. If the node is stopped + // and then very quickly started, that goroutine may be scheduled later + // then start and force `node.Up = false` in its defer function. + // This will make this test unreliable. + time.Sleep(time.Second) + + err = sim.StartNode(id) + if err != nil { + t.Fatal(err) + } + if !n.Up { + t.Error("node not started") + } +} + +func TestStartStopRandomNode(t *testing.T) { + sim := New(noopServiceFuncMap) + defer sim.Close() + + _, err := sim.AddNodes(3) + if err != nil { + t.Fatal(err) + } + + id, err := sim.StopRandomNode() + if err != nil { + t.Fatal(err) + } + + n := sim.Net.GetNode(id) + if n == nil { + t.Fatal("node not found") + } + if n.Up { + t.Error("node not stopped") + } + + id2, err := sim.StopRandomNode() + if err != nil { + t.Fatal(err) + } + + // Sleep here to ensure that Network.watchPeerEvents defer function + // has set the `node.Up = false` before we start the node again. + // p2p/simulations/network.go:215 + // + // The same node is stopped and started again, and upon start + // watchPeerEvents is started in a goroutine. If the node is stopped + // and then very quickly started, that goroutine may be scheduled later + // then start and force `node.Up = false` in its defer function. + // This will make this test unreliable. + time.Sleep(time.Second) + + idStarted, err := sim.StartRandomNode() + if err != nil { + t.Fatal(err) + } + + if idStarted != id && idStarted != id2 { + t.Error("unexpected started node ID") + } +} + +func TestStartStopRandomNodes(t *testing.T) { + sim := New(noopServiceFuncMap) + defer sim.Close() + + _, err := sim.AddNodes(10) + if err != nil { + t.Fatal(err) + } + + ids, err := sim.StopRandomNodes(3) + if err != nil { + t.Fatal(err) + } + + for _, id := range ids { + n := sim.Net.GetNode(id) + if n == nil { + t.Fatal("node not found") + } + if n.Up { + t.Error("node not stopped") + } + } + + // Sleep here to ensure that Network.watchPeerEvents defer function + // has set the `node.Up = false` before we start the node again. + // p2p/simulations/network.go:215 + // + // The same node is stopped and started again, and upon start + // watchPeerEvents is started in a goroutine. If the node is stopped + // and then very quickly started, that goroutine may be scheduled later + // then start and force `node.Up = false` in its defer function. + // This will make this test unreliable. + time.Sleep(time.Second) + + ids, err = sim.StartRandomNodes(2) + if err != nil { + t.Fatal(err) + } + + for _, id := range ids { + n := sim.Net.GetNode(id) + if n == nil { + t.Fatal("node not found") + } + if !n.Up { + t.Error("node not started") + } + } +} diff --git a/swarm/network/simulation/service.go b/swarm/network/simulation/service.go new file mode 100644 index 000000000000..02e7ad0cc91e --- /dev/null +++ b/swarm/network/simulation/service.go @@ -0,0 +1,65 @@ +// Copyright 2018 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package simulation + +import ( + "github.com/ethereum/go-ethereum/node" + "github.com/ethereum/go-ethereum/p2p/discover" + "github.com/ethereum/go-ethereum/p2p/simulations/adapters" +) + +// Service returns a single Service by name on a particular node +// with provided id. +func (s *Simulation) Service(name string, id discover.NodeID) node.Service { + simNode, ok := s.Net.GetNode(id).Node.(*adapters.SimNode) + if !ok { + return nil + } + services := simNode.ServiceMap() + if len(services) == 0 { + return nil + } + return services[name] +} + +// RandomService returns a single Service by name on a +// randomly chosen node that is up. +func (s *Simulation) RandomService(name string) node.Service { + n := s.RandomUpNode() + if n == nil { + return nil + } + return n.Service(name) +} + +// Services returns all services with a provided name +// from nodes that are up. +func (s *Simulation) Services(name string) (services map[discover.NodeID]node.Service) { + nodes := s.Net.GetNodes() + services = make(map[discover.NodeID]node.Service) + for _, node := range nodes { + if !node.Up { + continue + } + simNode, ok := node.Node.(*adapters.SimNode) + if !ok { + continue + } + services[node.ID()] = simNode.Service(name) + } + return services +} diff --git a/crypto/randentropy/rand_entropy.go b/swarm/network/simulation/service_test.go similarity index 58% rename from crypto/randentropy/rand_entropy.go rename to swarm/network/simulation/service_test.go index 539d3ac894f2..23b0d86f243f 100644 --- a/crypto/randentropy/rand_entropy.go +++ b/swarm/network/simulation/service_test.go @@ -1,4 +1,4 @@ -// Copyright 2015 The go-ethereum Authors +// Copyright 2018 The go-ethereum Authors // This file is part of the go-ethereum library. // // The go-ethereum library is free software: you can redistribute it and/or modify @@ -14,29 +14,33 @@ // You should have received a copy of the GNU Lesser General Public License // along with the go-ethereum library. If not, see . -package randentropy +package simulation import ( - crand "crypto/rand" - "io" + "testing" ) -var Reader io.Reader = &randEntropy{} +func TestService(t *testing.T) { + sim := New(noopServiceFuncMap) + defer sim.Close() -type randEntropy struct { -} + id, err := sim.AddNode() + if err != nil { + t.Fatal(err) + } -func (*randEntropy) Read(bytes []byte) (n int, err error) { - readBytes := GetEntropyCSPRNG(len(bytes)) - copy(bytes, readBytes) - return len(bytes), nil -} + _, ok := sim.Service("noop", id).(*noopService) + if !ok { + t.Fatalf("service is not of %T type", &noopService{}) + } -func GetEntropyCSPRNG(n int) []byte { - mainBuff := make([]byte, n) - _, err := io.ReadFull(crand.Reader, mainBuff) - if err != nil { - panic("reading from crypto/rand failed: " + err.Error()) + _, ok = sim.RandomService("noop").(*noopService) + if !ok { + t.Fatalf("service is not of %T type", &noopService{}) + } + + _, ok = sim.Services("noop")[id].(*noopService) + if !ok { + t.Fatalf("service is not of %T type", &noopService{}) } - return mainBuff } diff --git a/swarm/network/simulation/simulation.go b/swarm/network/simulation/simulation.go new file mode 100644 index 000000000000..74f9d98ee93f --- /dev/null +++ b/swarm/network/simulation/simulation.go @@ -0,0 +1,216 @@ +// Copyright 2018 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package simulation + +import ( + "context" + "errors" + "net/http" + "sync" + "time" + + "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/node" + "github.com/ethereum/go-ethereum/p2p/discover" + "github.com/ethereum/go-ethereum/p2p/simulations" + "github.com/ethereum/go-ethereum/p2p/simulations/adapters" +) + +// Common errors that are returned by functions in this package. +var ( + ErrNodeNotFound = errors.New("node not found") + ErrNoPivotNode = errors.New("no pivot node set") +) + +// Simulation provides methods on network, nodes and services +// to manage them. +type Simulation struct { + // Net is exposed as a way to access lower level functionalities + // of p2p/simulations.Network. + Net *simulations.Network + + serviceNames []string + cleanupFuncs []func() + buckets map[discover.NodeID]*sync.Map + pivotNodeID *discover.NodeID + shutdownWG sync.WaitGroup + done chan struct{} + mu sync.RWMutex + + httpSrv *http.Server //attach a HTTP server via SimulationOptions + handler *simulations.Server //HTTP handler for the server + runC chan struct{} //channel where frontend signals it is ready +} + +// ServiceFunc is used in New to declare new service constructor. +// The first argument provides ServiceContext from the adapters package +// giving for example the access to NodeID. Second argument is the sync.Map +// where all "global" state related to the service should be kept. +// All cleanups needed for constructed service and any other constructed +// objects should ne provided in a single returned cleanup function. +// Returned cleanup function will be called by Close function +// after network shutdown. +type ServiceFunc func(ctx *adapters.ServiceContext, bucket *sync.Map) (s node.Service, cleanup func(), err error) + +// New creates a new Simulation instance with new +// simulations.Network initialized with provided services. +func New(services map[string]ServiceFunc) (s *Simulation) { + s = &Simulation{ + buckets: make(map[discover.NodeID]*sync.Map), + done: make(chan struct{}), + } + + adapterServices := make(map[string]adapters.ServiceFunc, len(services)) + for name, serviceFunc := range services { + s.serviceNames = append(s.serviceNames, name) + adapterServices[name] = func(ctx *adapters.ServiceContext) (node.Service, error) { + b := new(sync.Map) + service, cleanup, err := serviceFunc(ctx, b) + if err != nil { + return nil, err + } + s.mu.Lock() + defer s.mu.Unlock() + if cleanup != nil { + s.cleanupFuncs = append(s.cleanupFuncs, cleanup) + } + s.buckets[ctx.Config.ID] = b + return service, nil + } + } + + s.Net = simulations.NewNetwork( + adapters.NewSimAdapter(adapterServices), + &simulations.NetworkConfig{ID: "0"}, + ) + + return s +} + +// RunFunc is the function that will be called +// on Simulation.Run method call. +type RunFunc func(context.Context, *Simulation) error + +// Result is the returned value of Simulation.Run method. +type Result struct { + Duration time.Duration + Error error +} + +// Run calls the RunFunc function while taking care of +// cancelation provided through the Context. +func (s *Simulation) Run(ctx context.Context, f RunFunc) (r Result) { + //if the option is set to run a HTTP server with the simulation, + //init the server and start it + start := time.Now() + if s.httpSrv != nil { + log.Info("Waiting for frontend to be ready...(send POST /runsim to HTTP server)") + //wait for the frontend to connect + select { + case <-s.runC: + case <-ctx.Done(): + return Result{ + Duration: time.Since(start), + Error: ctx.Err(), + } + } + log.Info("Received signal from frontend - starting simulation run.") + } + errc := make(chan error) + quit := make(chan struct{}) + defer close(quit) + go func() { + select { + case errc <- f(ctx, s): + case <-quit: + } + }() + var err error + select { + case <-ctx.Done(): + err = ctx.Err() + case err = <-errc: + } + return Result{ + Duration: time.Since(start), + Error: err, + } +} + +// Maximal number of parallel calls to cleanup functions on +// Simulation.Close. +var maxParallelCleanups = 10 + +// Close calls all cleanup functions that are returned by +// ServiceFunc, waits for all of them to finish and other +// functions that explicitly block shutdownWG +// (like Simulation.PeerEvents) and shuts down the network +// at the end. It is used to clean all resources from the +// simulation. +func (s *Simulation) Close() { + close(s.done) + + // Close all connections before calling the Network Shutdown. + // It is possible that p2p.Server.Stop will block if there are + // existing connections. + for _, c := range s.Net.Conns { + if c.Up { + s.Net.Disconnect(c.One, c.Other) + } + } + s.shutdownWG.Wait() + s.Net.Shutdown() + + sem := make(chan struct{}, maxParallelCleanups) + s.mu.RLock() + cleanupFuncs := make([]func(), len(s.cleanupFuncs)) + for i, f := range s.cleanupFuncs { + if f != nil { + cleanupFuncs[i] = f + } + } + s.mu.RUnlock() + var cleanupWG sync.WaitGroup + for _, cleanup := range cleanupFuncs { + cleanupWG.Add(1) + sem <- struct{}{} + go func(cleanup func()) { + defer cleanupWG.Done() + defer func() { <-sem }() + + cleanup() + }(cleanup) + } + cleanupWG.Wait() + + if s.httpSrv != nil { + ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) + defer cancel() + err := s.httpSrv.Shutdown(ctx) + if err != nil { + log.Error("Error shutting down HTTP server!", "err", err) + } + close(s.runC) + } +} + +// Done returns a channel that is closed when the simulation +// is closed by Close method. It is useful for signaling termination +// of all possible goroutines that are created within the test. +func (s *Simulation) Done() <-chan struct{} { + return s.done +} diff --git a/swarm/network/simulation/simulation_test.go b/swarm/network/simulation/simulation_test.go new file mode 100644 index 000000000000..eed09bf508d5 --- /dev/null +++ b/swarm/network/simulation/simulation_test.go @@ -0,0 +1,207 @@ +// Copyright 2018 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package simulation + +import ( + "context" + "errors" + "flag" + "sync" + "testing" + "time" + + "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/node" + "github.com/ethereum/go-ethereum/p2p" + "github.com/ethereum/go-ethereum/p2p/simulations/adapters" + "github.com/ethereum/go-ethereum/rpc" + colorable "github.com/mattn/go-colorable" +) + +var ( + loglevel = flag.Int("loglevel", 2, "verbosity of logs") +) + +func init() { + flag.Parse() + log.PrintOrigins(true) + log.Root().SetHandler(log.LvlFilterHandler(log.Lvl(*loglevel), log.StreamHandler(colorable.NewColorableStderr(), log.TerminalFormat(true)))) +} + +// TestRun tests if Run method calls RunFunc and if it handles context properly. +func TestRun(t *testing.T) { + sim := New(noopServiceFuncMap) + defer sim.Close() + + t.Run("call", func(t *testing.T) { + expect := "something" + var got string + r := sim.Run(context.Background(), func(ctx context.Context, sim *Simulation) error { + got = expect + return nil + }) + + if r.Error != nil { + t.Errorf("unexpected error: %v", r.Error) + } + if got != expect { + t.Errorf("expected %q, got %q", expect, got) + } + }) + + t.Run("cancellation", func(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), 50*time.Millisecond) + defer cancel() + + r := sim.Run(ctx, func(ctx context.Context, sim *Simulation) error { + time.Sleep(time.Second) + return nil + }) + + if r.Error != context.DeadlineExceeded { + t.Errorf("unexpected error: %v", r.Error) + } + }) + + t.Run("context value and duration", func(t *testing.T) { + ctx := context.WithValue(context.Background(), "hey", "there") + sleep := 50 * time.Millisecond + + r := sim.Run(ctx, func(ctx context.Context, sim *Simulation) error { + if ctx.Value("hey") != "there" { + return errors.New("expected context value not passed") + } + time.Sleep(sleep) + return nil + }) + + if r.Error != nil { + t.Errorf("unexpected error: %v", r.Error) + } + if r.Duration < sleep { + t.Errorf("reported run duration less then expected: %s", r.Duration) + } + }) +} + +// TestClose tests are Close method triggers all close functions and are all nodes not up anymore. +func TestClose(t *testing.T) { + var mu sync.Mutex + var cleanupCount int + + sleep := 50 * time.Millisecond + + sim := New(map[string]ServiceFunc{ + "noop": func(ctx *adapters.ServiceContext, b *sync.Map) (node.Service, func(), error) { + return newNoopService(), func() { + time.Sleep(sleep) + mu.Lock() + defer mu.Unlock() + cleanupCount++ + }, nil + }, + }) + + nodeCount := 30 + + _, err := sim.AddNodes(nodeCount) + if err != nil { + t.Fatal(err) + } + + var upNodeCount int + for _, n := range sim.Net.GetNodes() { + if n.Up { + upNodeCount++ + } + } + if upNodeCount != nodeCount { + t.Errorf("all nodes should be up, insted only %v are up", upNodeCount) + } + + sim.Close() + + if cleanupCount != nodeCount { + t.Errorf("number of cleanups expected %v, got %v", nodeCount, cleanupCount) + } + + upNodeCount = 0 + for _, n := range sim.Net.GetNodes() { + if n.Up { + upNodeCount++ + } + } + if upNodeCount != 0 { + t.Errorf("all nodes should be down, insted %v are up", upNodeCount) + } +} + +// TestDone checks if Close method triggers the closing of done channel. +func TestDone(t *testing.T) { + sim := New(noopServiceFuncMap) + sleep := 50 * time.Millisecond + timeout := 2 * time.Second + + start := time.Now() + go func() { + time.Sleep(sleep) + sim.Close() + }() + + select { + case <-time.After(timeout): + t.Error("done channel closing timed out") + case <-sim.Done(): + if d := time.Since(start); d < sleep { + t.Errorf("done channel closed sooner then expected: %s", d) + } + } +} + +// a helper map for usual services that do not do anything +var noopServiceFuncMap = map[string]ServiceFunc{ + "noop": noopServiceFunc, +} + +// a helper function for most basic noop service +func noopServiceFunc(ctx *adapters.ServiceContext, b *sync.Map) (node.Service, func(), error) { + return newNoopService(), nil, nil +} + +// noopService is the service that does not do anything +// but implements node.Service interface. +type noopService struct{} + +func newNoopService() node.Service { + return &noopService{} +} + +func (t *noopService) Protocols() []p2p.Protocol { + return []p2p.Protocol{} +} + +func (t *noopService) APIs() []rpc.API { + return []rpc.API{} +} + +func (t *noopService) Start(server *p2p.Server) error { + return nil +} + +func (t *noopService) Stop() error { + return nil +} diff --git a/swarm/network/stream/common_test.go b/swarm/network/stream/common_test.go index 9d1f997f29fb..491dc9fd5855 100644 --- a/swarm/network/stream/common_test.go +++ b/swarm/network/stream/common_test.go @@ -18,135 +18,70 @@ package stream import ( "context" - "encoding/binary" + crand "crypto/rand" "errors" "flag" "fmt" "io" "io/ioutil" + "math/rand" "os" + "strings" "sync/atomic" "testing" "time" - "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/log" - "github.com/ethereum/go-ethereum/node" - "github.com/ethereum/go-ethereum/p2p" "github.com/ethereum/go-ethereum/p2p/discover" - "github.com/ethereum/go-ethereum/p2p/simulations/adapters" p2ptest "github.com/ethereum/go-ethereum/p2p/testing" - "github.com/ethereum/go-ethereum/rpc" "github.com/ethereum/go-ethereum/swarm/network" + "github.com/ethereum/go-ethereum/swarm/network/simulation" + "github.com/ethereum/go-ethereum/swarm/pot" "github.com/ethereum/go-ethereum/swarm/state" "github.com/ethereum/go-ethereum/swarm/storage" - "github.com/ethereum/go-ethereum/swarm/storage/mock" - "github.com/ethereum/go-ethereum/swarm/storage/mock/db" + mockdb "github.com/ethereum/go-ethereum/swarm/storage/mock/db" colorable "github.com/mattn/go-colorable" ) var ( - deliveries map[discover.NodeID]*Delivery - stores map[discover.NodeID]storage.ChunkStore - toAddr func(discover.NodeID) *network.BzzAddr - peerCount func(discover.NodeID) int - adapter = flag.String("adapter", "sim", "type of simulation: sim|exec|docker") loglevel = flag.Int("loglevel", 2, "verbosity of logs") nodes = flag.Int("nodes", 0, "number of nodes") chunks = flag.Int("chunks", 0, "number of chunks") useMockStore = flag.Bool("mockstore", false, "disabled mock store (default: enabled)") -) + longrunning = flag.Bool("longrunning", false, "do run long-running tests") -var ( - defaultSkipCheck bool - waitPeerErrC chan error - chunkSize = 4096 - registries map[discover.NodeID]*TestRegistry - createStoreFunc func(id discover.NodeID, addr *network.BzzAddr) (storage.ChunkStore, error) - getRetrieveFunc = defaultRetrieveFunc - subscriptionCount = 0 - globalStore mock.GlobalStorer - globalStoreDir string -) + bucketKeyDB = simulation.BucketKey("db") + bucketKeyStore = simulation.BucketKey("store") + bucketKeyFileStore = simulation.BucketKey("filestore") + bucketKeyNetStore = simulation.BucketKey("netstore") + bucketKeyDelivery = simulation.BucketKey("delivery") + bucketKeyRegistry = simulation.BucketKey("registry") -var services = adapters.Services{ - "streamer": NewStreamerService, - "intervalsStreamer": newIntervalsStreamerService, -} + chunkSize = 4096 + pof = pot.DefaultPof(256) +) func init() { flag.Parse() - // register the Delivery service which will run as a devp2p - // protocol when using the exec adapter - adapters.RegisterServices(services) + rand.Seed(time.Now().UnixNano()) log.PrintOrigins(true) log.Root().SetHandler(log.LvlFilterHandler(log.Lvl(*loglevel), log.StreamHandler(colorable.NewColorableStderr(), log.TerminalFormat(true)))) } -func createGlobalStore() { - var err error - globalStoreDir, err = ioutil.TempDir("", "global.store") +func createGlobalStore() (string, *mockdb.GlobalStore, error) { + var globalStore *mockdb.GlobalStore + globalStoreDir, err := ioutil.TempDir("", "global.store") if err != nil { log.Error("Error initiating global store temp directory!", "err", err) - return + return "", nil, err } - globalStore, err = db.NewGlobalStore(globalStoreDir) + globalStore, err = mockdb.NewGlobalStore(globalStoreDir) if err != nil { log.Error("Error initiating global store!", "err", err) + return "", nil, err } -} - -// NewStreamerService -func NewStreamerService(ctx *adapters.ServiceContext) (node.Service, error) { - var err error - id := ctx.Config.ID - addr := toAddr(id) - kad := network.NewKademlia(addr.Over(), network.NewKadParams()) - stores[id], err = createStoreFunc(id, addr) - if err != nil { - return nil, err - } - store := stores[id].(*storage.LocalStore) - db := storage.NewDBAPI(store) - delivery := NewDelivery(kad, db) - deliveries[id] = delivery - r := NewRegistry(addr, delivery, db, state.NewInmemoryStore(), &RegistryOptions{ - SkipCheck: defaultSkipCheck, - DoRetrieve: false, - }) - RegisterSwarmSyncerServer(r, db) - RegisterSwarmSyncerClient(r, db) - go func() { - waitPeerErrC <- waitForPeers(r, 1*time.Second, peerCount(id)) - }() - fileStore := storage.NewFileStore(storage.NewNetStore(store, getRetrieveFunc(id)), storage.NewFileStoreParams()) - testRegistry := &TestRegistry{Registry: r, fileStore: fileStore} - registries[id] = testRegistry - return testRegistry, nil -} - -func defaultRetrieveFunc(id discover.NodeID) func(chunk *storage.Chunk) error { - return nil -} - -func datadirsCleanup() { - for _, id := range ids { - os.RemoveAll(datadirs[id]) - } - if globalStoreDir != "" { - os.RemoveAll(globalStoreDir) - } -} - -//local stores need to be cleaned up after the sim is done -func localStoreCleanup() { - log.Info("Cleaning up...") - for _, id := range ids { - registries[id].Close() - stores[id].Close() - } - log.Info("Local store cleanup done") + return globalStoreDir, globalStore, nil } func newStreamerTester(t *testing.T) (*p2ptest.ProtocolTester, *Registry, *storage.LocalStore, func(), error) { @@ -174,9 +109,7 @@ func newStreamerTester(t *testing.T) (*p2ptest.ProtocolTester, *Registry, *stora db := storage.NewDBAPI(localStore) delivery := NewDelivery(to, db) - streamer := NewRegistry(addr, delivery, db, state.NewInmemoryStore(), &RegistryOptions{ - SkipCheck: defaultSkipCheck, - }) + streamer := NewRegistry(addr, delivery, db, state.NewInmemoryStore(), nil) teardown := func() { streamer.Close() removeDataDir() @@ -217,14 +150,14 @@ func newRoundRobinStore(stores ...storage.ChunkStore) *roundRobinStore { } } -func (rrs *roundRobinStore) Get(addr storage.Address) (*storage.Chunk, error) { +func (rrs *roundRobinStore) Get(ctx context.Context, addr storage.Address) (*storage.Chunk, error) { return nil, errors.New("get not well defined on round robin store") } -func (rrs *roundRobinStore) Put(chunk *storage.Chunk) { +func (rrs *roundRobinStore) Put(ctx context.Context, chunk *storage.Chunk) { i := atomic.AddUint32(&rrs.index, 1) idx := int(i) % len(rrs.stores) - rrs.stores[idx].Put(chunk) + rrs.stores[idx].Put(ctx, chunk) } func (rrs *roundRobinStore) Close() { @@ -233,24 +166,8 @@ func (rrs *roundRobinStore) Close() { } } -type TestRegistry struct { - *Registry - fileStore *storage.FileStore -} - -func (r *TestRegistry) APIs() []rpc.API { - a := r.Registry.APIs() - a = append(a, rpc.API{ - Namespace: "stream", - Version: "3.0", - Service: r, - Public: true, - }) - return a -} - func readAll(fileStore *storage.FileStore, hash []byte) (int64, error) { - r, _ := fileStore.Retrieve(hash) + r, _ := fileStore.Retrieve(context.TODO(), hash) buf := make([]byte, 1024) var n int var total int64 @@ -265,185 +182,74 @@ func readAll(fileStore *storage.FileStore, hash []byte) (int64, error) { return total, nil } -func (r *TestRegistry) ReadAll(hash common.Hash) (int64, error) { - return readAll(r.fileStore, hash[:]) -} - -func (r *TestRegistry) Start(server *p2p.Server) error { - return r.Registry.Start(server) -} - -func (r *TestRegistry) Stop() error { - return r.Registry.Stop() -} - -type TestExternalRegistry struct { - *Registry -} - -func (r *TestExternalRegistry) APIs() []rpc.API { - a := r.Registry.APIs() - a = append(a, rpc.API{ - Namespace: "stream", - Version: "3.0", - Service: r, - Public: true, - }) - return a -} - -func (r *TestExternalRegistry) GetHashes(ctx context.Context, peerId discover.NodeID, s Stream) (*rpc.Subscription, error) { - peer := r.getPeer(peerId) - - client, err := peer.getClient(ctx, s) - if err != nil { - return nil, err - } - - c := client.Client.(*testExternalClient) +func uploadFilesToNodes(sim *simulation.Simulation) ([]storage.Address, []string, error) { + nodes := sim.UpNodeIDs() + nodeCnt := len(nodes) + log.Debug(fmt.Sprintf("Uploading %d files to nodes", nodeCnt)) + //array holding generated files + rfiles := make([]string, nodeCnt) + //array holding the root hashes of the files + rootAddrs := make([]storage.Address, nodeCnt) - notifier, supported := rpc.NotifierFromContext(ctx) - if !supported { - return nil, fmt.Errorf("Subscribe not supported") - } - - sub := notifier.CreateSubscription() - - go func() { - // if we begin sending event immediately some events - // will probably be dropped since the subscription ID might not be send to - // the client. - // ref: rpc/subscription_test.go#L65 - time.Sleep(1 * time.Second) - for { - select { - case h := <-c.hashes: - <-c.enableNotificationsC // wait for notification subscription to complete - if err := notifier.Notify(sub.ID, h); err != nil { - log.Warn(fmt.Sprintf("rpc sub notifier notify stream %s: %v", s, err)) - } - case err := <-sub.Err(): - if err != nil { - log.Warn(fmt.Sprintf("caught subscription error in stream %s: %v", s, err)) - } - case <-notifier.Closed(): - log.Trace(fmt.Sprintf("rpc sub notifier closed")) - return - } + var err error + //for every node, generate a file and upload + for i, id := range nodes { + item, ok := sim.NodeItem(id, bucketKeyFileStore) + if !ok { + return nil, nil, fmt.Errorf("Error accessing localstore") } - }() - - return sub, nil -} - -func (r *TestExternalRegistry) EnableNotifications(peerId discover.NodeID, s Stream) error { - peer := r.getPeer(peerId) - - ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) - defer cancel() - - client, err := peer.getClient(ctx, s) - if err != nil { - return err - } - - close(client.Client.(*testExternalClient).enableNotificationsC) - - return nil -} - -// TODO: merge functionalities of testExternalClient and testExternalServer -// with testClient and testServer. - -type testExternalClient struct { - hashes chan []byte - db *storage.DBAPI - enableNotificationsC chan struct{} -} - -func newTestExternalClient(db *storage.DBAPI) *testExternalClient { - return &testExternalClient{ - hashes: make(chan []byte), - db: db, - enableNotificationsC: make(chan struct{}), - } -} - -func (c *testExternalClient) NeedData(hash []byte) func() { - chunk, _ := c.db.GetOrCreateRequest(hash) - if chunk.ReqC == nil { - return nil - } - c.hashes <- hash - return func() { - chunk.WaitToStore() + fileStore := item.(*storage.FileStore) + //generate a file + rfiles[i], err = generateRandomFile() + if err != nil { + return nil, nil, err + } + //store it (upload it) on the FileStore + ctx := context.TODO() + rk, wait, err := fileStore.Store(ctx, strings.NewReader(rfiles[i]), int64(len(rfiles[i])), false) + log.Debug("Uploaded random string file to node") + if err != nil { + return nil, nil, err + } + err = wait(ctx) + if err != nil { + return nil, nil, err + } + rootAddrs[i] = rk } + return rootAddrs, rfiles, nil } -func (c *testExternalClient) BatchDone(Stream, uint64, []byte, []byte) func() (*TakeoverProof, error) { - return nil -} - -func (c *testExternalClient) Close() {} - -const testExternalServerBatchSize = 10 - -type testExternalServer struct { - t string - keyFunc func(key []byte, index uint64) - sessionAt uint64 - maxKeys uint64 - streamer *TestExternalRegistry -} - -func newTestExternalServer(t string, sessionAt, maxKeys uint64, keyFunc func(key []byte, index uint64)) *testExternalServer { - if keyFunc == nil { - keyFunc = binary.BigEndian.PutUint64 - } - return &testExternalServer{ - t: t, - keyFunc: keyFunc, - sessionAt: sessionAt, - maxKeys: maxKeys, +//generate a random file (string) +func generateRandomFile() (string, error) { + //generate a random file size between minFileSize and maxFileSize + fileSize := rand.Intn(maxFileSize-minFileSize) + minFileSize + log.Debug(fmt.Sprintf("Generated file with filesize %d kB", fileSize)) + b := make([]byte, fileSize*1024) + _, err := crand.Read(b) + if err != nil { + log.Error("Error generating random file.", "err", err) + return "", err } + return string(b), nil } -func (s *testExternalServer) SetNextBatch(from uint64, to uint64) ([]byte, uint64, uint64, *HandoverProof, error) { - if from == 0 && to == 0 { - from = s.sessionAt - to = s.sessionAt + testExternalServerBatchSize - } - if to-from > testExternalServerBatchSize { - to = from + testExternalServerBatchSize - 1 - } - if from >= s.maxKeys && to > s.maxKeys { - return nil, 0, 0, nil, io.EOF - } - if to > s.maxKeys { - to = s.maxKeys +//create a local store for the given node +func createTestLocalStorageForID(id discover.NodeID, addr *network.BzzAddr) (storage.ChunkStore, string, error) { + var datadir string + var err error + datadir, err = ioutil.TempDir("", fmt.Sprintf("syncer-test-%s", id.TerminalString())) + if err != nil { + return nil, "", err } - b := make([]byte, HashSize*(to-from+1)) - for i := from; i <= to; i++ { - s.keyFunc(b[(i-from)*HashSize:(i-from+1)*HashSize], i) + var store storage.ChunkStore + params := storage.NewDefaultLocalStoreParams() + params.ChunkDbPath = datadir + params.BaseKey = addr.Over() + store, err = storage.NewTestLocalStoreForAddr(params) + if err != nil { + os.RemoveAll(datadir) + return nil, "", err } - return b, from, to, nil, nil -} - -func (s *testExternalServer) GetData([]byte) ([]byte, error) { - return make([]byte, 4096), nil -} - -func (s *testExternalServer) Close() {} - -// Sets the global value defaultSkipCheck. -// It should be used in test function defer to reset the global value -// to the original value. -// -// defer setDefaultSkipCheck(defaultSkipCheck) -// defaultSkipCheck = skipCheck -// -// This works as defer function arguments evaluations are evaluated as ususal, -// but only the function body invocation is deferred. -func setDefaultSkipCheck(skipCheck bool) { - defaultSkipCheck = skipCheck + return store, datadir, nil } diff --git a/swarm/network/stream/delivery.go b/swarm/network/stream/delivery.go index 75aabad6c3b1..36040339d3d8 100644 --- a/swarm/network/stream/delivery.go +++ b/swarm/network/stream/delivery.go @@ -17,15 +17,19 @@ package stream import ( + "context" "errors" "time" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/metrics" "github.com/ethereum/go-ethereum/p2p/discover" + cp "github.com/ethereum/go-ethereum/swarm/chunk" "github.com/ethereum/go-ethereum/swarm/log" "github.com/ethereum/go-ethereum/swarm/network" + "github.com/ethereum/go-ethereum/swarm/spancontext" "github.com/ethereum/go-ethereum/swarm/storage" + opentracing "github.com/opentracing/opentracing-go" ) const ( @@ -118,8 +122,8 @@ func (s *SwarmChunkServer) Close() { } // GetData retrives chunk data from db store -func (s *SwarmChunkServer) GetData(key []byte) ([]byte, error) { - chunk, err := s.db.Get(storage.Address(key)) +func (s *SwarmChunkServer) GetData(ctx context.Context, key []byte) ([]byte, error) { + chunk, err := s.db.Get(ctx, storage.Address(key)) if err == storage.ErrFetching { <-chunk.ReqC } else if err != nil { @@ -134,25 +138,37 @@ type RetrieveRequestMsg struct { SkipCheck bool } -func (d *Delivery) handleRetrieveRequestMsg(sp *Peer, req *RetrieveRequestMsg) error { +func (d *Delivery) handleRetrieveRequestMsg(ctx context.Context, sp *Peer, req *RetrieveRequestMsg) error { log.Trace("received request", "peer", sp.ID(), "hash", req.Addr) handleRetrieveRequestMsgCount.Inc(1) + var osp opentracing.Span + ctx, osp = spancontext.StartSpan( + ctx, + "retrieve.request") + defer osp.Finish() + s, err := sp.getServer(NewStream(swarmChunkServerStreamName, "", false)) if err != nil { return err } streamer := s.Server.(*SwarmChunkServer) - chunk, created := d.db.GetOrCreateRequest(req.Addr) + chunk, created := d.db.GetOrCreateRequest(ctx, req.Addr) if chunk.ReqC != nil { if created { - if err := d.RequestFromPeers(chunk.Addr[:], true, sp.ID()); err != nil { + if err := d.RequestFromPeers(ctx, chunk.Addr[:], true, sp.ID()); err != nil { log.Warn("unable to forward chunk request", "peer", sp.ID(), "key", chunk.Addr, "err", err) chunk.SetErrored(storage.ErrChunkForward) return nil } } go func() { + var osp opentracing.Span + ctx, osp = spancontext.StartSpan( + ctx, + "waiting.delivery") + defer osp.Finish() + t := time.NewTimer(10 * time.Minute) defer t.Stop() @@ -169,7 +185,7 @@ func (d *Delivery) handleRetrieveRequestMsg(sp *Peer, req *RetrieveRequestMsg) e chunk.SetErrored(nil) if req.SkipCheck { - err := sp.Deliver(chunk, s.priority) + err := sp.Deliver(ctx, chunk, s.priority) if err != nil { log.Warn("ERROR in handleRetrieveRequestMsg, DROPPING peer!", "err", err) sp.Drop(err) @@ -185,7 +201,7 @@ func (d *Delivery) handleRetrieveRequestMsg(sp *Peer, req *RetrieveRequestMsg) e if length := len(chunk.SData); length < 9 { log.Error("Chunk.SData to deliver is too short", "len(chunk.SData)", length, "address", chunk.Addr) } - return sp.Deliver(chunk, s.priority) + return sp.Deliver(ctx, chunk, s.priority) } streamer.deliveryC <- chunk.Addr[:] return nil @@ -197,7 +213,13 @@ type ChunkDeliveryMsg struct { peer *Peer // set in handleChunkDeliveryMsg } -func (d *Delivery) handleChunkDeliveryMsg(sp *Peer, req *ChunkDeliveryMsg) error { +func (d *Delivery) handleChunkDeliveryMsg(ctx context.Context, sp *Peer, req *ChunkDeliveryMsg) error { + var osp opentracing.Span + ctx, osp = spancontext.StartSpan( + ctx, + "chunk.delivery") + defer osp.Finish() + req.peer = sp d.receiveC <- req return nil @@ -208,8 +230,13 @@ R: for req := range d.receiveC { processReceivedChunksCount.Inc(1) + if len(req.SData) > cp.DefaultSize+8 { + log.Warn("received chunk is bigger than expected", "len", len(req.SData)) + continue R + } + // this should be has locally - chunk, err := d.db.Get(req.Addr) + chunk, err := d.db.Get(context.TODO(), req.Addr) if err == nil { continue R } @@ -223,8 +250,9 @@ R: continue R default: } + chunk.SData = req.SData - d.db.Put(chunk) + d.db.Put(context.TODO(), chunk) go func(req *ChunkDeliveryMsg) { err := chunk.WaitToStore() @@ -236,10 +264,11 @@ R: } // RequestFromPeers sends a chunk retrieve request to -func (d *Delivery) RequestFromPeers(hash []byte, skipCheck bool, peersToSkip ...discover.NodeID) error { +func (d *Delivery) RequestFromPeers(ctx context.Context, hash []byte, skipCheck bool, peersToSkip ...discover.NodeID) error { var success bool var err error requestFromPeersCount.Inc(1) + d.overlay.EachConn(hash, 255, func(p network.OverlayConn, po int, nn bool) bool { spId := p.(network.Peer).ID() for _, p := range peersToSkip { @@ -253,8 +282,7 @@ func (d *Delivery) RequestFromPeers(hash []byte, skipCheck bool, peersToSkip ... log.Warn("Delivery.RequestFromPeers: peer not found", "id", spId) return true } - // TODO: skip light nodes that do not accept retrieve requests - err = sp.SendPriority(&RetrieveRequestMsg{ + err = sp.SendPriority(ctx, &RetrieveRequestMsg{ Addr: hash, SkipCheck: skipCheck, }, Top) diff --git a/swarm/network/stream/delivery_test.go b/swarm/network/stream/delivery_test.go index b03028c88816..972cc859a35b 100644 --- a/swarm/network/stream/delivery_test.go +++ b/swarm/network/stream/delivery_test.go @@ -22,18 +22,19 @@ import ( crand "crypto/rand" "fmt" "io" + "os" "sync" "testing" "time" - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/p2p/discover" - "github.com/ethereum/go-ethereum/p2p/simulations" + "github.com/ethereum/go-ethereum/node" + "github.com/ethereum/go-ethereum/p2p" + "github.com/ethereum/go-ethereum/p2p/simulations/adapters" p2ptest "github.com/ethereum/go-ethereum/p2p/testing" - "github.com/ethereum/go-ethereum/rpc" "github.com/ethereum/go-ethereum/swarm/log" "github.com/ethereum/go-ethereum/swarm/network" - streamTesting "github.com/ethereum/go-ethereum/swarm/network/stream/testing" + "github.com/ethereum/go-ethereum/swarm/network/simulation" + "github.com/ethereum/go-ethereum/swarm/state" "github.com/ethereum/go-ethereum/swarm/storage" ) @@ -46,7 +47,7 @@ func TestStreamerRetrieveRequest(t *testing.T) { peerID := tester.IDs[0] - streamer.delivery.RequestFromPeers(hash0[:], true) + streamer.delivery.RequestFromPeers(context.TODO(), hash0[:], true) err = tester.TestExchanges(p2ptest.Exchange{ Label: "RetrieveRequestMsg", @@ -80,7 +81,7 @@ func TestStreamerUpstreamRetrieveRequestMsgExchangeWithoutStore(t *testing.T) { peer := streamer.getPeer(peerID) - peer.handleSubscribeMsg(&SubscribeMsg{ + peer.handleSubscribeMsg(context.TODO(), &SubscribeMsg{ Stream: NewStream(swarmChunkServerStreamName, "", false), History: nil, Priority: Top, @@ -131,7 +132,7 @@ func TestStreamerUpstreamRetrieveRequestMsgExchange(t *testing.T) { stream := NewStream(swarmChunkServerStreamName, "", false) - peer.handleSubscribeMsg(&SubscribeMsg{ + peer.handleSubscribeMsg(context.TODO(), &SubscribeMsg{ Stream: stream, History: nil, Priority: Top, @@ -140,7 +141,7 @@ func TestStreamerUpstreamRetrieveRequestMsgExchange(t *testing.T) { hash := storage.Address(hash0[:]) chunk := storage.NewChunk(hash, nil) chunk.SData = hash - localStore.Put(chunk) + localStore.Put(context.TODO(), chunk) chunk.WaitToStore() err = tester.TestExchanges(p2ptest.Exchange{ @@ -179,7 +180,7 @@ func TestStreamerUpstreamRetrieveRequestMsgExchange(t *testing.T) { hash = storage.Address(hash1[:]) chunk = storage.NewChunk(hash, nil) chunk.SData = hash1[:] - localStore.Put(chunk) + localStore.Put(context.TODO(), chunk) chunk.WaitToStore() err = tester.TestExchanges(p2ptest.Exchange{ @@ -234,7 +235,7 @@ func TestStreamerDownstreamChunkDeliveryMsgExchange(t *testing.T) { chunkKey := hash0[:] chunkData := hash1[:] - chunk, created := localStore.GetOrCreateRequest(chunkKey) + chunk, created := localStore.GetOrCreateRequest(context.TODO(), chunkKey) if !created { t.Fatal("chunk already exists") @@ -285,7 +286,7 @@ func TestStreamerDownstreamChunkDeliveryMsgExchange(t *testing.T) { case <-chunk.ReqC: } - storedChunk, err := localStore.Get(chunkKey) + storedChunk, err := localStore.Get(context.TODO(), chunkKey) if err != nil { t.Fatalf("Expected no error, got %v", err) } @@ -308,155 +309,164 @@ func TestDeliveryFromNodes(t *testing.T) { } func testDeliveryFromNodes(t *testing.T, nodes, conns, chunkCount int, skipCheck bool) { - defaultSkipCheck = skipCheck - toAddr = network.NewAddrFromNodeID - createStoreFunc = createTestLocalStorageFromSim - conf := &streamTesting.RunConfig{ - Adapter: *adapter, - NodeCount: nodes, - ConnLevel: conns, - ToAddr: toAddr, - Services: services, - EnableMsgEvents: false, - } + sim := simulation.New(map[string]simulation.ServiceFunc{ + "streamer": func(ctx *adapters.ServiceContext, bucket *sync.Map) (s node.Service, cleanup func(), err error) { - sim, teardown, err := streamTesting.NewSimulation(conf) - var rpcSubscriptionsWg sync.WaitGroup - defer func() { - rpcSubscriptionsWg.Wait() - teardown() - }() - if err != nil { - t.Fatal(err.Error()) - } - stores = make(map[discover.NodeID]storage.ChunkStore) - for i, id := range sim.IDs { - stores[id] = sim.Stores[i] - } - registries = make(map[discover.NodeID]*TestRegistry) - deliveries = make(map[discover.NodeID]*Delivery) - peerCount = func(id discover.NodeID) int { - if sim.IDs[0] == id || sim.IDs[nodes-1] == id { - return 1 - } - return 2 - } + id := ctx.Config.ID + addr := network.NewAddrFromNodeID(id) + store, datadir, err := createTestLocalStorageForID(id, addr) + if err != nil { + return nil, nil, err + } + bucket.Store(bucketKeyStore, store) + cleanup = func() { + os.RemoveAll(datadir) + store.Close() + } + localStore := store.(*storage.LocalStore) + db := storage.NewDBAPI(localStore) + kad := network.NewKademlia(addr.Over(), network.NewKadParams()) + delivery := NewDelivery(kad, db) + + r := NewRegistry(addr, delivery, db, state.NewInmemoryStore(), &RegistryOptions{ + SkipCheck: skipCheck, + }) + bucket.Store(bucketKeyRegistry, r) - // here we distribute chunks of a random file into Stores of nodes 1 to nodes - rrFileStore := storage.NewFileStore(newRoundRobinStore(sim.Stores[1:]...), storage.NewFileStoreParams()) - size := chunkCount * chunkSize - fileHash, wait, err := rrFileStore.Store(io.LimitReader(crand.Reader, int64(size)), int64(size), false) - // wait until all chunks stored - wait() + retrieveFunc := func(ctx context.Context, chunk *storage.Chunk) error { + return delivery.RequestFromPeers(ctx, chunk.Addr[:], skipCheck) + } + netStore := storage.NewNetStore(localStore, retrieveFunc) + fileStore := storage.NewFileStore(netStore, storage.NewFileStoreParams()) + bucket.Store(bucketKeyFileStore, fileStore) + + return r, cleanup, nil + + }, + }) + defer sim.Close() + + log.Info("Adding nodes to simulation") + _, err := sim.AddNodesAndConnectChain(nodes) if err != nil { - t.Fatal(err.Error()) + t.Fatal(err) } - errc := make(chan error, 1) - waitPeerErrC = make(chan error) - quitC := make(chan struct{}) - defer close(quitC) - - action := func(ctx context.Context) error { - // each node Subscribes to each other's swarmChunkServerStreamName - // need to wait till an aynchronous process registers the peers in streamer.peers - // that is used by Subscribe - // using a global err channel to share betweem action and node service + + log.Info("Starting simulation") + ctx := context.Background() + result := sim.Run(ctx, func(ctx context.Context, sim *simulation.Simulation) error { + nodeIDs := sim.UpNodeIDs() + //determine the pivot node to be the first node of the simulation + sim.SetPivotNode(nodeIDs[0]) + //distribute chunks of a random file into Stores of nodes 1 to nodes + //we will do this by creating a file store with an underlying round-robin store: + //the file store will create a hash for the uploaded file, but every chunk will be + //distributed to different nodes via round-robin scheduling + log.Debug("Writing file to round-robin file store") + //to do this, we create an array for chunkstores (length minus one, the pivot node) + stores := make([]storage.ChunkStore, len(nodeIDs)-1) + //we then need to get all stores from the sim.... + lStores := sim.NodesItems(bucketKeyStore) i := 0 - for err := range waitPeerErrC { - if err != nil { - return fmt.Errorf("error waiting for peers: %s", err) + //...iterate the buckets... + for id, bucketVal := range lStores { + //...and remove the one which is the pivot node + if id == *sim.PivotNodeID() { + continue } + //the other ones are added to the array... + stores[i] = bucketVal.(storage.ChunkStore) i++ - if i == nodes { - break - } + } + //...which then gets passed to the round-robin file store + roundRobinFileStore := storage.NewFileStore(newRoundRobinStore(stores...), storage.NewFileStoreParams()) + //now we can actually upload a (random) file to the round-robin store + size := chunkCount * chunkSize + log.Debug("Storing data to file store") + fileHash, wait, err := roundRobinFileStore.Store(ctx, io.LimitReader(crand.Reader, int64(size)), int64(size), false) + // wait until all chunks stored + if err != nil { + return err + } + err = wait(ctx) + if err != nil { + return err } - // each node subscribes to the upstream swarm chunk server stream - // which responds to chunk retrieve requests all but the last node in the chain does not - for j := 0; j < nodes-1; j++ { - id := sim.IDs[j] - err := sim.CallClient(id, func(client *rpc.Client) error { - doneC, err := streamTesting.WatchDisconnections(id, client, errc, quitC) - if err != nil { - return err - } - rpcSubscriptionsWg.Add(1) - go func() { - <-doneC - rpcSubscriptionsWg.Done() - }() - ctx, cancel := context.WithTimeout(ctx, 1*time.Second) - defer cancel() - sid := sim.IDs[j+1] - return client.CallContext(ctx, nil, "stream_subscribeStream", sid, NewStream(swarmChunkServerStreamName, "", false), NewRange(0, 0), Top) - }) + log.Debug("Waiting for kademlia") + if _, err := sim.WaitTillHealthy(ctx, 2); err != nil { + return err + } + + //each of the nodes (except pivot node) subscribes to the stream of the next node + for j, node := range nodeIDs[0 : nodes-1] { + sid := nodeIDs[j+1] + item, ok := sim.NodeItem(node, bucketKeyRegistry) + if !ok { + return fmt.Errorf("No registry") + } + registry := item.(*Registry) + err = registry.Subscribe(sid, NewStream(swarmChunkServerStreamName, "", false), NewRange(0, 0), Top) if err != nil { return err } } - // create a retriever FileStore for the pivot node - delivery := deliveries[sim.IDs[0]] - retrieveFunc := func(chunk *storage.Chunk) error { - return delivery.RequestFromPeers(chunk.Addr[:], skipCheck) - } - netStore := storage.NewNetStore(sim.Stores[0].(*storage.LocalStore), retrieveFunc) - fileStore := storage.NewFileStore(netStore, storage.NewFileStoreParams()) + //get the pivot node's filestore + item, ok := sim.NodeItem(*sim.PivotNodeID(), bucketKeyFileStore) + if !ok { + return fmt.Errorf("No filestore") + } + pivotFileStore := item.(*storage.FileStore) + log.Debug("Starting retrieval routine") go func() { // start the retrieval on the pivot node - this will spawn retrieve requests for missing chunks // we must wait for the peer connections to have started before requesting - n, err := readAll(fileStore, fileHash) + n, err := readAll(pivotFileStore, fileHash) log.Info(fmt.Sprintf("retrieved %v", fileHash), "read", n, "err", err) if err != nil { - errc <- fmt.Errorf("requesting chunks action error: %v", err) + t.Fatalf("requesting chunks action error: %v", err) } }() - return nil - } - check := func(ctx context.Context, id discover.NodeID) (bool, error) { - select { - case err := <-errc: - return false, err - case <-ctx.Done(): - return false, ctx.Err() - default: - } + + log.Debug("Watching for disconnections") + disconnections := sim.PeerEvents( + context.Background(), + sim.NodeIDs(), + simulation.NewPeerEventsFilter().Type(p2p.PeerEventTypeDrop), + ) + + go func() { + for d := range disconnections { + if d.Error != nil { + log.Error("peer drop", "node", d.NodeID, "peer", d.Event.Peer) + t.Fatal(d.Error) + } + } + }() + + //finally check that the pivot node gets all chunks via the root hash + log.Debug("Check retrieval") + success := true var total int64 - err := sim.CallClient(id, func(client *rpc.Client) error { - ctx, cancel := context.WithTimeout(ctx, 5*time.Second) - defer cancel() - return client.CallContext(ctx, &total, "stream_readAll", common.BytesToHash(fileHash)) - }) + total, err = readAll(pivotFileStore, fileHash) + if err != nil { + return err + } log.Info(fmt.Sprintf("check if %08x is available locally: number of bytes read %v/%v (error: %v)", fileHash, total, size, err)) if err != nil || total != int64(size) { - return false, nil + success = false } - return true, nil - } - conf.Step = &simulations.Step{ - Action: action, - Trigger: streamTesting.Trigger(10*time.Millisecond, quitC, sim.IDs[0]), - // we are only testing the pivot node (net.Nodes[0]) - Expect: &simulations.Expectation{ - Nodes: sim.IDs[0:1], - Check: check, - }, - } - startedAt := time.Now() - timeout := 300 * time.Second - ctx, cancel := context.WithTimeout(context.Background(), timeout) - defer cancel() - result, err := sim.Run(ctx, conf) - finishedAt := time.Now() - if err != nil { - t.Fatalf("Setting up simulation failed: %v", err) - } + if !success { + return fmt.Errorf("Test failed, chunks not available on all nodes") + } + log.Debug("Test terminated successfully") + return nil + }) if result.Error != nil { - t.Fatalf("Simulation failed: %s", result.Error) + t.Fatal(result.Error) } - streamTesting.CheckResult(t, result, startedAt, finishedAt) } func BenchmarkDeliveryFromNodesWithoutCheck(b *testing.B) { @@ -486,214 +496,146 @@ func BenchmarkDeliveryFromNodesWithCheck(b *testing.B) { } func benchmarkDeliveryFromNodes(b *testing.B, nodes, conns, chunkCount int, skipCheck bool) { - defaultSkipCheck = skipCheck - toAddr = network.NewAddrFromNodeID - createStoreFunc = createTestLocalStorageFromSim - registries = make(map[discover.NodeID]*TestRegistry) - - timeout := 300 * time.Second - ctx, cancel := context.WithTimeout(context.Background(), timeout) - defer cancel() - - conf := &streamTesting.RunConfig{ - Adapter: *adapter, - NodeCount: nodes, - ConnLevel: conns, - ToAddr: toAddr, - Services: services, - EnableMsgEvents: false, - } - sim, teardown, err := streamTesting.NewSimulation(conf) - var rpcSubscriptionsWg sync.WaitGroup - defer func() { - rpcSubscriptionsWg.Wait() - teardown() - }() - if err != nil { - b.Fatal(err.Error()) - } - - stores = make(map[discover.NodeID]storage.ChunkStore) - deliveries = make(map[discover.NodeID]*Delivery) - for i, id := range sim.IDs { - stores[id] = sim.Stores[i] - } - peerCount = func(id discover.NodeID) int { - if sim.IDs[0] == id || sim.IDs[nodes-1] == id { - return 1 - } - return 2 - } - // wait channel for all nodes all peer connections to set up - waitPeerErrC = make(chan error) - - // create a FileStore for the last node in the chain which we are gonna write to - remoteFileStore := storage.NewFileStore(sim.Stores[nodes-1], storage.NewFileStoreParams()) - - // channel to signal simulation initialisation with action call complete - // or node disconnections - disconnectC := make(chan error) - quitC := make(chan struct{}) - - initC := make(chan error) + sim := simulation.New(map[string]simulation.ServiceFunc{ + "streamer": func(ctx *adapters.ServiceContext, bucket *sync.Map) (s node.Service, cleanup func(), err error) { - action := func(ctx context.Context) error { - // each node Subscribes to each other's swarmChunkServerStreamName - // need to wait till an aynchronous process registers the peers in streamer.peers - // that is used by Subscribe - // waitPeerErrC using a global err channel to share betweem action and node service - i := 0 - for err := range waitPeerErrC { + id := ctx.Config.ID + addr := network.NewAddrFromNodeID(id) + store, datadir, err := createTestLocalStorageForID(id, addr) if err != nil { - return fmt.Errorf("error waiting for peers: %s", err) + return nil, nil, err } - i++ - if i == nodes { - break + bucket.Store(bucketKeyStore, store) + cleanup = func() { + os.RemoveAll(datadir) + store.Close() } - } - var err error - // each node except the last one subscribes to the upstream swarm chunk server stream - // which responds to chunk retrieve requests - for j := 0; j < nodes-1; j++ { - id := sim.IDs[j] - err = sim.CallClient(id, func(client *rpc.Client) error { - doneC, err := streamTesting.WatchDisconnections(id, client, disconnectC, quitC) - if err != nil { - return err - } - rpcSubscriptionsWg.Add(1) - go func() { - <-doneC - rpcSubscriptionsWg.Done() - }() - ctx, cancel := context.WithTimeout(ctx, 1*time.Second) - defer cancel() - sid := sim.IDs[j+1] // the upstream peer's id - return client.CallContext(ctx, nil, "stream_subscribeStream", sid, NewStream(swarmChunkServerStreamName, "", false), NewRange(0, 0), Top) + localStore := store.(*storage.LocalStore) + db := storage.NewDBAPI(localStore) + kad := network.NewKademlia(addr.Over(), network.NewKadParams()) + delivery := NewDelivery(kad, db) + + r := NewRegistry(addr, delivery, db, state.NewInmemoryStore(), &RegistryOptions{ + SkipCheck: skipCheck, + DoSync: true, + SyncUpdateDelay: 0, }) - if err != nil { - break + + retrieveFunc := func(ctx context.Context, chunk *storage.Chunk) error { + return delivery.RequestFromPeers(ctx, chunk.Addr[:], skipCheck) } - } - initC <- err - return nil - } + netStore := storage.NewNetStore(localStore, retrieveFunc) + fileStore := storage.NewFileStore(netStore, storage.NewFileStoreParams()) + bucket.Store(bucketKeyFileStore, fileStore) - // the check function is only triggered when the benchmark finishes - trigger := make(chan discover.NodeID) - check := func(ctx context.Context, id discover.NodeID) (_ bool, err error) { - return true, nil - } + return r, cleanup, nil - conf.Step = &simulations.Step{ - Action: action, - Trigger: trigger, - // we are only testing the pivot node (net.Nodes[0]) - Expect: &simulations.Expectation{ - Nodes: sim.IDs[0:1], - Check: check, }, - } - - // run the simulation in the background - errc := make(chan error) - go func() { - _, err := sim.Run(ctx, conf) - close(quitC) - errc <- err - }() + }) + defer sim.Close() - // wait for simulation action to complete stream subscriptions - err = <-initC + log.Info("Initializing test config") + _, err := sim.AddNodesAndConnectChain(nodes) if err != nil { - b.Fatalf("simulation failed to initialise. expected no error. got %v", err) + b.Fatal(err) } - // create a retriever FileStore for the pivot node - // by now deliveries are set for each node by the streamer service - delivery := deliveries[sim.IDs[0]] - retrieveFunc := func(chunk *storage.Chunk) error { - return delivery.RequestFromPeers(chunk.Addr[:], skipCheck) - } - netStore := storage.NewNetStore(sim.Stores[0].(*storage.LocalStore), retrieveFunc) - - // benchmark loop - b.ResetTimer() - b.StopTimer() -Loop: - for i := 0; i < b.N; i++ { - // uploading chunkCount random chunks to the last node - hashes := make([]storage.Address, chunkCount) - for i := 0; i < chunkCount; i++ { - // create actual size real chunks - hash, wait, err := remoteFileStore.Store(io.LimitReader(crand.Reader, int64(chunkSize)), int64(chunkSize), false) - // wait until all chunks stored - wait() - if err != nil { - b.Fatalf("expected no error. got %v", err) - } - // collect the hashes - hashes[i] = hash + ctx := context.Background() + result := sim.Run(ctx, func(ctx context.Context, sim *simulation.Simulation) error { + nodeIDs := sim.UpNodeIDs() + node := nodeIDs[len(nodeIDs)-1] + + item, ok := sim.NodeItem(node, bucketKeyFileStore) + if !ok { + b.Fatal("No filestore") } - // now benchmark the actual retrieval - // netstore.Get is called for each hash in a go routine and errors are collected - b.StartTimer() - errs := make(chan error) - for _, hash := range hashes { - go func(h storage.Address) { - _, err := netStore.Get(h) - log.Warn("test check netstore get", "hash", h, "err", err) - errs <- err - }(hash) + remoteFileStore := item.(*storage.FileStore) + + pivotNode := nodeIDs[0] + item, ok = sim.NodeItem(pivotNode, bucketKeyNetStore) + if !ok { + b.Fatal("No filestore") } - // count and report retrieval errors - // if there are misses then chunk timeout is too low for the distance and volume (?) - var total, misses int - for err := range errs { - if err != nil { - log.Warn(err.Error()) - misses++ - } - total++ - if total == chunkCount { - break - } + netStore := item.(*storage.NetStore) + + if _, err := sim.WaitTillHealthy(ctx, 2); err != nil { + return err } + + disconnections := sim.PeerEvents( + context.Background(), + sim.NodeIDs(), + simulation.NewPeerEventsFilter().Type(p2p.PeerEventTypeDrop), + ) + + go func() { + for d := range disconnections { + if d.Error != nil { + log.Error("peer drop", "node", d.NodeID, "peer", d.Event.Peer) + b.Fatal(d.Error) + } + } + }() + // benchmark loop + b.ResetTimer() b.StopTimer() + Loop: + for i := 0; i < b.N; i++ { + // uploading chunkCount random chunks to the last node + hashes := make([]storage.Address, chunkCount) + for i := 0; i < chunkCount; i++ { + // create actual size real chunks + ctx := context.TODO() + hash, wait, err := remoteFileStore.Store(ctx, io.LimitReader(crand.Reader, int64(chunkSize)), int64(chunkSize), false) + if err != nil { + b.Fatalf("expected no error. got %v", err) + } + // wait until all chunks stored + err = wait(ctx) + if err != nil { + b.Fatalf("expected no error. got %v", err) + } + // collect the hashes + hashes[i] = hash + } + // now benchmark the actual retrieval + // netstore.Get is called for each hash in a go routine and errors are collected + b.StartTimer() + errs := make(chan error) + for _, hash := range hashes { + go func(h storage.Address) { + _, err := netStore.Get(ctx, h) + log.Warn("test check netstore get", "hash", h, "err", err) + errs <- err + }(hash) + } + // count and report retrieval errors + // if there are misses then chunk timeout is too low for the distance and volume (?) + var total, misses int + for err := range errs { + if err != nil { + log.Warn(err.Error()) + misses++ + } + total++ + if total == chunkCount { + break + } + } + b.StopTimer() - select { - case err = <-disconnectC: - if err != nil { + if misses > 0 { + err = fmt.Errorf("%v chunk not found out of %v", misses, total) break Loop } - default: - } - - if misses > 0 { - err = fmt.Errorf("%v chunk not found out of %v", misses, total) - break Loop } - } - - select { - case <-quitC: - case trigger <- sim.IDs[0]: - } - if err == nil { - err = <-errc - } else { - if e := <-errc; e != nil { - b.Errorf("sim.Run function error: %v", e) + if err != nil { + b.Fatal(err) } + return nil + }) + if result.Error != nil { + b.Fatal(result.Error) } - // benchmark over, trigger the check function to conclude the simulation - if err != nil { - b.Fatalf("expected no error. got %v", err) - } -} - -func createTestLocalStorageFromSim(id discover.NodeID, addr *network.BzzAddr) (storage.ChunkStore, error) { - return stores[id], nil } diff --git a/swarm/network/stream/intervals_test.go b/swarm/network/stream/intervals_test.go index 4e2721cb0faa..f4294134b9a1 100644 --- a/swarm/network/stream/intervals_test.go +++ b/swarm/network/stream/intervals_test.go @@ -22,52 +22,22 @@ import ( "encoding/binary" "fmt" "io" + "os" "sync" "testing" "time" + "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/node" + "github.com/ethereum/go-ethereum/p2p" "github.com/ethereum/go-ethereum/p2p/discover" - "github.com/ethereum/go-ethereum/p2p/simulations" "github.com/ethereum/go-ethereum/p2p/simulations/adapters" - "github.com/ethereum/go-ethereum/rpc" "github.com/ethereum/go-ethereum/swarm/network" - streamTesting "github.com/ethereum/go-ethereum/swarm/network/stream/testing" + "github.com/ethereum/go-ethereum/swarm/network/simulation" "github.com/ethereum/go-ethereum/swarm/state" "github.com/ethereum/go-ethereum/swarm/storage" ) -var ( - externalStreamName = "externalStream" - externalStreamSessionAt uint64 = 50 - externalStreamMaxKeys uint64 = 100 -) - -func newIntervalsStreamerService(ctx *adapters.ServiceContext) (node.Service, error) { - id := ctx.Config.ID - addr := toAddr(id) - kad := network.NewKademlia(addr.Over(), network.NewKadParams()) - store := stores[id].(*storage.LocalStore) - db := storage.NewDBAPI(store) - delivery := NewDelivery(kad, db) - deliveries[id] = delivery - r := NewRegistry(addr, delivery, db, state.NewInmemoryStore(), &RegistryOptions{ - SkipCheck: defaultSkipCheck, - }) - - r.RegisterClientFunc(externalStreamName, func(p *Peer, t string, live bool) (Client, error) { - return newTestExternalClient(db), nil - }) - r.RegisterServerFunc(externalStreamName, func(p *Peer, t string, live bool) (Server, error) { - return newTestExternalServer(t, externalStreamSessionAt, externalStreamMaxKeys, nil), nil - }) - - go func() { - waitPeerErrC <- waitForPeers(r, 1*time.Second, peerCount(id)) - }() - return &TestExternalRegistry{r}, nil -} - func TestIntervals(t *testing.T) { testIntervals(t, true, nil, false) testIntervals(t, false, NewRange(9, 26), false) @@ -81,233 +51,337 @@ func TestIntervals(t *testing.T) { func testIntervals(t *testing.T, live bool, history *Range, skipCheck bool) { nodes := 2 chunkCount := dataChunkCount + externalStreamName := "externalStream" + externalStreamSessionAt := uint64(50) + externalStreamMaxKeys := uint64(100) - defer setDefaultSkipCheck(defaultSkipCheck) - defaultSkipCheck = skipCheck - - toAddr = network.NewAddrFromNodeID - conf := &streamTesting.RunConfig{ - Adapter: *adapter, - NodeCount: nodes, - ConnLevel: 1, - ToAddr: toAddr, - Services: services, - DefaultService: "intervalsStreamer", - } + sim := simulation.New(map[string]simulation.ServiceFunc{ + "intervalsStreamer": func(ctx *adapters.ServiceContext, bucket *sync.Map) (s node.Service, cleanup func(), err error) { - sim, teardown, err := streamTesting.NewSimulation(conf) - var rpcSubscriptionsWg sync.WaitGroup - defer func() { - rpcSubscriptionsWg.Wait() - teardown() - }() - if err != nil { - t.Fatal(err) - } + id := ctx.Config.ID + addr := network.NewAddrFromNodeID(id) + store, datadir, err := createTestLocalStorageForID(id, addr) + if err != nil { + return nil, nil, err + } + bucket.Store(bucketKeyStore, store) + cleanup = func() { + store.Close() + os.RemoveAll(datadir) + } + localStore := store.(*storage.LocalStore) + db := storage.NewDBAPI(localStore) + kad := network.NewKademlia(addr.Over(), network.NewKadParams()) + delivery := NewDelivery(kad, db) - stores = make(map[discover.NodeID]storage.ChunkStore) - deliveries = make(map[discover.NodeID]*Delivery) - for i, id := range sim.IDs { - stores[id] = sim.Stores[i] - } + r := NewRegistry(addr, delivery, db, state.NewInmemoryStore(), &RegistryOptions{ + SkipCheck: skipCheck, + }) + bucket.Store(bucketKeyRegistry, r) - peerCount = func(id discover.NodeID) int { - return 1 - } + r.RegisterClientFunc(externalStreamName, func(p *Peer, t string, live bool) (Client, error) { + return newTestExternalClient(db), nil + }) + r.RegisterServerFunc(externalStreamName, func(p *Peer, t string, live bool) (Server, error) { + return newTestExternalServer(t, externalStreamSessionAt, externalStreamMaxKeys, nil), nil + }) + + fileStore := storage.NewFileStore(localStore, storage.NewFileStoreParams()) + bucket.Store(bucketKeyFileStore, fileStore) + + return r, cleanup, nil + + }, + }) + defer sim.Close() - fileStore := storage.NewFileStore(sim.Stores[0], storage.NewFileStoreParams()) - size := chunkCount * chunkSize - _, wait, err := fileStore.Store(io.LimitReader(crand.Reader, int64(size)), int64(size), false) - wait() + log.Info("Adding nodes to simulation") + _, err := sim.AddNodesAndConnectChain(nodes) if err != nil { t.Fatal(err) } - errc := make(chan error, 1) - waitPeerErrC = make(chan error) - quitC := make(chan struct{}) - defer close(quitC) + ctx, cancel := context.WithTimeout(context.Background(), 100*time.Second) + defer cancel() - action := func(ctx context.Context) error { - i := 0 - for err := range waitPeerErrC { - if err != nil { - return fmt.Errorf("error waiting for peers: %s", err) - } - i++ - if i == nodes { - break - } + result := sim.Run(ctx, func(ctx context.Context, sim *simulation.Simulation) error { + nodeIDs := sim.UpNodeIDs() + storer := nodeIDs[0] + checker := nodeIDs[1] + + item, ok := sim.NodeItem(storer, bucketKeyFileStore) + if !ok { + return fmt.Errorf("No filestore") + } + fileStore := item.(*storage.FileStore) + + size := chunkCount * chunkSize + _, wait, err := fileStore.Store(ctx, io.LimitReader(crand.Reader, int64(size)), int64(size), false) + if err != nil { + log.Error("Store error: %v", "err", err) + t.Fatal(err) + } + err = wait(ctx) + if err != nil { + log.Error("Wait error: %v", "err", err) + t.Fatal(err) } - id := sim.IDs[1] + item, ok = sim.NodeItem(checker, bucketKeyRegistry) + if !ok { + return fmt.Errorf("No registry") + } + registry := item.(*Registry) - err := sim.CallClient(id, func(client *rpc.Client) error { + liveErrC := make(chan error) + historyErrC := make(chan error) - sid := sim.IDs[0] + if _, err := sim.WaitTillHealthy(ctx, 2); err != nil { + log.Error("WaitKademlia error: %v", "err", err) + return err + } - doneC, err := streamTesting.WatchDisconnections(id, client, errc, quitC) - if err != nil { - return err + log.Debug("Watching for disconnections") + disconnections := sim.PeerEvents( + context.Background(), + sim.NodeIDs(), + simulation.NewPeerEventsFilter().Type(p2p.PeerEventTypeDrop), + ) + + go func() { + for d := range disconnections { + if d.Error != nil { + log.Error("peer drop", "node", d.NodeID, "peer", d.Event.Peer) + t.Fatal(d.Error) + } + } + }() + + go func() { + if !live { + close(liveErrC) + return } - rpcSubscriptionsWg.Add(1) - go func() { - <-doneC - rpcSubscriptionsWg.Done() + + var err error + defer func() { + liveErrC <- err }() - ctx, cancel := context.WithTimeout(ctx, 100*time.Second) - defer cancel() - err = client.CallContext(ctx, nil, "stream_subscribeStream", sid, NewStream(externalStreamName, "", live), history, Top) + // live stream + var liveHashesChan chan []byte + liveHashesChan, err = getHashes(ctx, registry, storer, NewStream(externalStreamName, "", true)) if err != nil { - return err + log.Error("Subscription error: %v", "err", err) + return } + i := externalStreamSessionAt - liveErrC := make(chan error) - historyErrC := make(chan error) + // we have subscribed, enable notifications + err = enableNotifications(registry, storer, NewStream(externalStreamName, "", true)) + if err != nil { + return + } - go func() { - if !live { - close(liveErrC) + for { + select { + case hash := <-liveHashesChan: + h := binary.BigEndian.Uint64(hash) + if h != i { + err = fmt.Errorf("expected live hash %d, got %d", i, h) + return + } + i++ + if i > externalStreamMaxKeys { + return + } + case <-ctx.Done(): return } + } + }() - var err error - defer func() { - liveErrC <- err - }() + go func() { + if live && history == nil { + close(historyErrC) + return + } - // live stream - liveHashesChan := make(chan []byte) - liveSubscription, err := client.Subscribe(ctx, "stream", liveHashesChan, "getHashes", sid, NewStream(externalStreamName, "", true)) - if err != nil { - return - } - defer liveSubscription.Unsubscribe() + var err error + defer func() { + historyErrC <- err + }() - i := externalStreamSessionAt + // history stream + var historyHashesChan chan []byte + historyHashesChan, err = getHashes(ctx, registry, storer, NewStream(externalStreamName, "", false)) + if err != nil { + return + } - // we have subscribed, enable notifications - err = client.CallContext(ctx, nil, "stream_enableNotifications", sid, NewStream(externalStreamName, "", true)) - if err != nil { - return + var i uint64 + historyTo := externalStreamMaxKeys + if history != nil { + i = history.From + if history.To != 0 { + historyTo = history.To } + } - for { - select { - case hash := <-liveHashesChan: - h := binary.BigEndian.Uint64(hash) - if h != i { - err = fmt.Errorf("expected live hash %d, got %d", i, h) - return - } - i++ - if i > externalStreamMaxKeys { - return - } - case err = <-liveSubscription.Err(): + // we have subscribed, enable notifications + err = enableNotifications(registry, storer, NewStream(externalStreamName, "", false)) + if err != nil { + return + } + + for { + select { + case hash := <-historyHashesChan: + h := binary.BigEndian.Uint64(hash) + if h != i { + err = fmt.Errorf("expected history hash %d, got %d", i, h) return - case <-ctx.Done(): + } + i++ + if i > historyTo { return } - } - }() - - go func() { - if live && history == nil { - close(historyErrC) + case <-ctx.Done(): return } + } + }() - var err error - defer func() { - historyErrC <- err - }() + err = registry.Subscribe(storer, NewStream(externalStreamName, "", live), history, Top) + if err != nil { + return err + } + if err := <-liveErrC; err != nil { + return err + } + if err := <-historyErrC; err != nil { + return err + } - // history stream - historyHashesChan := make(chan []byte) - historySubscription, err := client.Subscribe(ctx, "stream", historyHashesChan, "getHashes", sid, NewStream(externalStreamName, "", false)) - if err != nil { - return - } - defer historySubscription.Unsubscribe() - - var i uint64 - historyTo := externalStreamMaxKeys - if history != nil { - i = history.From - if history.To != 0 { - historyTo = history.To - } - } + return nil + }) - // we have subscribed, enable notifications - err = client.CallContext(ctx, nil, "stream_enableNotifications", sid, NewStream(externalStreamName, "", false)) - if err != nil { - return - } + if result.Error != nil { + t.Fatal(result.Error) + } +} - for { - select { - case hash := <-historyHashesChan: - h := binary.BigEndian.Uint64(hash) - if h != i { - err = fmt.Errorf("expected history hash %d, got %d", i, h) - return - } - i++ - if i > historyTo { - return - } - case err = <-historySubscription.Err(): - return - case <-ctx.Done(): - return - } - } - }() +func getHashes(ctx context.Context, r *Registry, peerID discover.NodeID, s Stream) (chan []byte, error) { + peer := r.getPeer(peerID) - if err := <-liveErrC; err != nil { - return err - } - if err := <-historyErrC; err != nil { - return err - } + client, err := peer.getClient(ctx, s) + if err != nil { + return nil, err + } - return nil - }) + c := client.Client.(*testExternalClient) + + return c.hashes, nil +} + +func enableNotifications(r *Registry, peerID discover.NodeID, s Stream) error { + peer := r.getPeer(peerID) + + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + + client, err := peer.getClient(ctx, s) + if err != nil { return err } - check := func(ctx context.Context, id discover.NodeID) (bool, error) { - select { - case err := <-errc: - return false, err - case <-ctx.Done(): - return false, ctx.Err() - default: + + close(client.Client.(*testExternalClient).enableNotificationsC) + + return nil +} + +type testExternalClient struct { + hashes chan []byte + db *storage.DBAPI + enableNotificationsC chan struct{} +} + +func newTestExternalClient(db *storage.DBAPI) *testExternalClient { + return &testExternalClient{ + hashes: make(chan []byte), + db: db, + enableNotificationsC: make(chan struct{}), + } +} + +func (c *testExternalClient) NeedData(ctx context.Context, hash []byte) func() { + chunk, _ := c.db.GetOrCreateRequest(ctx, hash) + if chunk.ReqC == nil { + return nil + } + c.hashes <- hash + //NOTE: This was failing on go1.9.x with a deadlock. + //Sometimes this function would just block + //It is commented now, but it may be well worth after the chunk refactor + //to re-enable this and see if the problem has been addressed + /* + return func() { + return chunk.WaitToStore() } - return true, nil + */ + return nil +} + +func (c *testExternalClient) BatchDone(Stream, uint64, []byte, []byte) func() (*TakeoverProof, error) { + return nil +} + +func (c *testExternalClient) Close() {} + +const testExternalServerBatchSize = 10 + +type testExternalServer struct { + t string + keyFunc func(key []byte, index uint64) + sessionAt uint64 + maxKeys uint64 +} + +func newTestExternalServer(t string, sessionAt, maxKeys uint64, keyFunc func(key []byte, index uint64)) *testExternalServer { + if keyFunc == nil { + keyFunc = binary.BigEndian.PutUint64 + } + return &testExternalServer{ + t: t, + keyFunc: keyFunc, + sessionAt: sessionAt, + maxKeys: maxKeys, } +} - conf.Step = &simulations.Step{ - Action: action, - Trigger: streamTesting.Trigger(10*time.Millisecond, quitC, sim.IDs[0]), - Expect: &simulations.Expectation{ - Nodes: sim.IDs[1:1], - Check: check, - }, +func (s *testExternalServer) SetNextBatch(from uint64, to uint64) ([]byte, uint64, uint64, *HandoverProof, error) { + if from == 0 && to == 0 { + from = s.sessionAt + to = s.sessionAt + testExternalServerBatchSize } - startedAt := time.Now() - timeout := 300 * time.Second - ctx, cancel := context.WithTimeout(context.Background(), timeout) - defer cancel() - result, err := sim.Run(ctx, conf) - finishedAt := time.Now() - if err != nil { - t.Fatalf("Setting up simulation failed: %v", err) + if to-from > testExternalServerBatchSize { + to = from + testExternalServerBatchSize - 1 } - if result.Error != nil { - t.Fatalf("Simulation failed: %s", result.Error) + if from >= s.maxKeys && to > s.maxKeys { + return nil, 0, 0, nil, io.EOF + } + if to > s.maxKeys { + to = s.maxKeys + } + b := make([]byte, HashSize*(to-from+1)) + for i := from; i <= to; i++ { + s.keyFunc(b[(i-from)*HashSize:(i-from+1)*HashSize], i) } - streamTesting.CheckResult(t, result, startedAt, finishedAt) + return b, from, to, nil, nil } + +func (s *testExternalServer) GetData(context.Context, []byte) ([]byte, error) { + return make([]byte, 4096), nil +} + +func (s *testExternalServer) Close() {} diff --git a/swarm/network/stream/messages.go b/swarm/network/stream/messages.go index 5668a73e9df8..a19f63589253 100644 --- a/swarm/network/stream/messages.go +++ b/swarm/network/stream/messages.go @@ -17,6 +17,7 @@ package stream import ( + "context" "errors" "fmt" "sync" @@ -25,7 +26,9 @@ import ( "github.com/ethereum/go-ethereum/metrics" "github.com/ethereum/go-ethereum/swarm/log" bv "github.com/ethereum/go-ethereum/swarm/network/bitvector" + "github.com/ethereum/go-ethereum/swarm/spancontext" "github.com/ethereum/go-ethereum/swarm/storage" + opentracing "github.com/opentracing/opentracing-go" ) // Stream defines a unique stream identifier. @@ -71,17 +74,17 @@ type RequestSubscriptionMsg struct { Priority uint8 // delivered on priority channel } -func (p *Peer) handleRequestSubscription(req *RequestSubscriptionMsg) (err error) { +func (p *Peer) handleRequestSubscription(ctx context.Context, req *RequestSubscriptionMsg) (err error) { log.Debug(fmt.Sprintf("handleRequestSubscription: streamer %s to subscribe to %s with stream %s", p.streamer.addr.ID(), p.ID(), req.Stream)) return p.streamer.Subscribe(p.ID(), req.Stream, req.History, req.Priority) } -func (p *Peer) handleSubscribeMsg(req *SubscribeMsg) (err error) { +func (p *Peer) handleSubscribeMsg(ctx context.Context, req *SubscribeMsg) (err error) { metrics.GetOrRegisterCounter("peer.handlesubscribemsg", nil).Inc(1) defer func() { if err != nil { - if e := p.Send(SubscribeErrorMsg{ + if e := p.Send(context.TODO(), SubscribeErrorMsg{ Error: err.Error(), }); e != nil { log.Error("send stream subscribe error message", "err", err) @@ -181,9 +184,15 @@ func (m OfferedHashesMsg) String() string { // handleOfferedHashesMsg protocol msg handler calls the incoming streamer interface // Filter method -func (p *Peer) handleOfferedHashesMsg(req *OfferedHashesMsg) error { +func (p *Peer) handleOfferedHashesMsg(ctx context.Context, req *OfferedHashesMsg) error { metrics.GetOrRegisterCounter("peer.handleofferedhashes", nil).Inc(1) + var sp opentracing.Span + ctx, sp = spancontext.StartSpan( + ctx, + "handle.offered.hashes") + defer sp.Finish() + c, _, err := p.getOrSetClient(req.Stream, req.From, req.To) if err != nil { return err @@ -197,7 +206,7 @@ func (p *Peer) handleOfferedHashesMsg(req *OfferedHashesMsg) error { for i := 0; i < len(hashes); i += HashSize { hash := hashes[i : i+HashSize] - if wait := c.NeedData(hash); wait != nil { + if wait := c.NeedData(ctx, hash); wait != nil { want.Set(i/HashSize, true) wg.Add(1) // create request and wait until the chunk data arrives and is stored @@ -260,7 +269,7 @@ func (p *Peer) handleOfferedHashesMsg(req *OfferedHashesMsg) error { return } log.Trace("sending want batch", "peer", p.ID(), "stream", msg.Stream, "from", msg.From, "to", msg.To) - err := p.SendPriority(msg, c.priority) + err := p.SendPriority(ctx, msg, c.priority) if err != nil { log.Warn("SendPriority err, so dropping peer", "err", err) p.Drop(err) @@ -285,7 +294,7 @@ func (m WantedHashesMsg) String() string { // handleWantedHashesMsg protocol msg handler // * sends the next batch of unsynced keys // * sends the actual data chunks as per WantedHashesMsg -func (p *Peer) handleWantedHashesMsg(req *WantedHashesMsg) error { +func (p *Peer) handleWantedHashesMsg(ctx context.Context, req *WantedHashesMsg) error { metrics.GetOrRegisterCounter("peer.handlewantedhashesmsg", nil).Inc(1) log.Trace("received wanted batch", "peer", p.ID(), "stream", req.Stream, "from", req.From, "to", req.To) @@ -314,7 +323,7 @@ func (p *Peer) handleWantedHashesMsg(req *WantedHashesMsg) error { metrics.GetOrRegisterCounter("peer.handlewantedhashesmsg.actualget", nil).Inc(1) hash := hashes[i*HashSize : (i+1)*HashSize] - data, err := s.GetData(hash) + data, err := s.GetData(ctx, hash) if err != nil { return fmt.Errorf("handleWantedHashesMsg get data %x: %v", hash, err) } @@ -323,7 +332,7 @@ func (p *Peer) handleWantedHashesMsg(req *WantedHashesMsg) error { if length := len(chunk.SData); length < 9 { log.Error("Chunk.SData to sync is too short", "len(chunk.SData)", length, "address", chunk.Addr) } - if err := p.Deliver(chunk, s.priority); err != nil { + if err := p.Deliver(ctx, chunk, s.priority); err != nil { return err } } @@ -363,7 +372,7 @@ func (m TakeoverProofMsg) String() string { return fmt.Sprintf("Stream: '%v' [%v-%v], Root: %x, Sig: %x", m.Stream, m.Start, m.End, m.Root, m.Sig) } -func (p *Peer) handleTakeoverProofMsg(req *TakeoverProofMsg) error { +func (p *Peer) handleTakeoverProofMsg(ctx context.Context, req *TakeoverProofMsg) error { _, err := p.getServer(req.Stream) // store the strongest takeoverproof for the stream in streamer return err diff --git a/swarm/network/stream/peer.go b/swarm/network/stream/peer.go index 29984a9115f5..80b9ab711a6e 100644 --- a/swarm/network/stream/peer.go +++ b/swarm/network/stream/peer.go @@ -27,8 +27,10 @@ import ( "github.com/ethereum/go-ethereum/swarm/log" pq "github.com/ethereum/go-ethereum/swarm/network/priorityqueue" "github.com/ethereum/go-ethereum/swarm/network/stream/intervals" + "github.com/ethereum/go-ethereum/swarm/spancontext" "github.com/ethereum/go-ethereum/swarm/state" "github.com/ethereum/go-ethereum/swarm/storage" + opentracing "github.com/opentracing/opentracing-go" ) var sendTimeout = 30 * time.Second @@ -62,6 +64,11 @@ type Peer struct { quit chan struct{} } +type WrappedPriorityMsg struct { + Context context.Context + Msg interface{} +} + // NewPeer is the constructor for Peer func NewPeer(peer *protocols.Peer, streamer *Registry) *Peer { p := &Peer{ @@ -74,7 +81,10 @@ func NewPeer(peer *protocols.Peer, streamer *Registry) *Peer { quit: make(chan struct{}), } ctx, cancel := context.WithCancel(context.Background()) - go p.pq.Run(ctx, func(i interface{}) { p.Send(i) }) + go p.pq.Run(ctx, func(i interface{}) { + wmsg := i.(WrappedPriorityMsg) + p.Send(wmsg.Context, wmsg.Msg) + }) go func() { <-p.quit cancel() @@ -83,25 +93,41 @@ func NewPeer(peer *protocols.Peer, streamer *Registry) *Peer { } // Deliver sends a storeRequestMsg protocol message to the peer -func (p *Peer) Deliver(chunk *storage.Chunk, priority uint8) error { +func (p *Peer) Deliver(ctx context.Context, chunk *storage.Chunk, priority uint8) error { + var sp opentracing.Span + ctx, sp = spancontext.StartSpan( + ctx, + "send.chunk.delivery") + defer sp.Finish() + msg := &ChunkDeliveryMsg{ Addr: chunk.Addr, SData: chunk.SData, } - return p.SendPriority(msg, priority) + return p.SendPriority(ctx, msg, priority) } // SendPriority sends message to the peer using the outgoing priority queue -func (p *Peer) SendPriority(msg interface{}, priority uint8) error { +func (p *Peer) SendPriority(ctx context.Context, msg interface{}, priority uint8) error { defer metrics.GetOrRegisterResettingTimer(fmt.Sprintf("peer.sendpriority_t.%d", priority), nil).UpdateSince(time.Now()) metrics.GetOrRegisterCounter(fmt.Sprintf("peer.sendpriority.%d", priority), nil).Inc(1) - ctx, cancel := context.WithTimeout(context.Background(), sendTimeout) + cctx, cancel := context.WithTimeout(context.Background(), sendTimeout) defer cancel() - return p.pq.Push(ctx, msg, int(priority)) + wmsg := WrappedPriorityMsg{ + Context: ctx, + Msg: msg, + } + return p.pq.Push(cctx, wmsg, int(priority)) } // SendOfferedHashes sends OfferedHashesMsg protocol msg func (p *Peer) SendOfferedHashes(s *server, f, t uint64) error { + var sp opentracing.Span + ctx, sp := spancontext.StartSpan( + context.TODO(), + "send.offered.hashes") + defer sp.Finish() + hashes, from, to, proof, err := s.SetNextBatch(f, t) if err != nil { return err @@ -124,7 +150,7 @@ func (p *Peer) SendOfferedHashes(s *server, f, t uint64) error { Stream: s.stream, } log.Trace("Swarm syncer offer batch", "peer", p.ID(), "stream", s.stream, "len", len(hashes), "from", from, "to", to) - return p.SendPriority(msg, s.priority) + return p.SendPriority(ctx, msg, s.priority) } func (p *Peer) getServer(s Stream) (*server, error) { diff --git a/swarm/network/stream/snapshot_retrieval_test.go b/swarm/network/stream/snapshot_retrieval_test.go index 59c776c3027c..4ff947b21555 100644 --- a/swarm/network/stream/snapshot_retrieval_test.go +++ b/swarm/network/stream/snapshot_retrieval_test.go @@ -17,20 +17,19 @@ package stream import ( "context" - crand "crypto/rand" "fmt" - "math/rand" - "strings" + "os" "sync" "testing" "time" - "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/node" "github.com/ethereum/go-ethereum/p2p/discover" - "github.com/ethereum/go-ethereum/p2p/simulations" + "github.com/ethereum/go-ethereum/p2p/simulations/adapters" "github.com/ethereum/go-ethereum/swarm/log" "github.com/ethereum/go-ethereum/swarm/network" - streamTesting "github.com/ethereum/go-ethereum/swarm/network/stream/testing" + "github.com/ethereum/go-ethereum/swarm/network/simulation" + "github.com/ethereum/go-ethereum/swarm/state" "github.com/ethereum/go-ethereum/swarm/storage" ) @@ -40,40 +39,6 @@ const ( maxFileSize = 40 ) -func initRetrievalTest() { - //global func to get overlay address from discover ID - toAddr = func(id discover.NodeID) *network.BzzAddr { - addr := network.NewAddrFromNodeID(id) - return addr - } - //global func to create local store - createStoreFunc = createTestLocalStorageForId - //local stores - stores = make(map[discover.NodeID]storage.ChunkStore) - //data directories for each node and store - datadirs = make(map[discover.NodeID]string) - //deliveries for each node - deliveries = make(map[discover.NodeID]*Delivery) - //global retrieve func - getRetrieveFunc = func(id discover.NodeID) func(chunk *storage.Chunk) error { - return func(chunk *storage.Chunk) error { - skipCheck := true - return deliveries[id].RequestFromPeers(chunk.Addr[:], skipCheck) - } - } - //registries, map of discover.NodeID to its streamer - registries = make(map[discover.NodeID]*TestRegistry) - //not needed for this test but required from common_test for NewStreamService - waitPeerErrC = make(chan error) - //also not needed for this test but required for NewStreamService - peerCount = func(id discover.NodeID) int { - if ids[0] == id || ids[len(ids)-1] == id { - return 1 - } - return 2 - } -} - //This test is a retrieval test for nodes. //A configurable number of nodes can be //provided to the test. @@ -81,7 +46,10 @@ func initRetrievalTest() { //Number of nodes can be provided via commandline too. func TestFileRetrieval(t *testing.T) { if *nodes != 0 { - fileRetrievalTest(t, *nodes) + err := runFileRetrievalTest(*nodes) + if err != nil { + t.Fatal(err) + } } else { nodeCnt := []int{16} //if the `longrunning` flag has been provided @@ -90,7 +58,10 @@ func TestFileRetrieval(t *testing.T) { nodeCnt = append(nodeCnt, 32, 64, 128) } for _, n := range nodeCnt { - fileRetrievalTest(t, n) + err := runFileRetrievalTest(n) + if err != nil { + t.Fatal(err) + } } } } @@ -105,7 +76,10 @@ func TestRetrieval(t *testing.T) { //if nodes/chunks have been provided via commandline, //run the tests with these values if *nodes != 0 && *chunks != 0 { - retrievalTest(t, *chunks, *nodes) + err := runRetrievalTest(*chunks, *nodes) + if err != nil { + t.Fatal(err) + } } else { var nodeCnt []int var chnkCnt []int @@ -121,76 +95,17 @@ func TestRetrieval(t *testing.T) { } for _, n := range nodeCnt { for _, c := range chnkCnt { - retrievalTest(t, c, n) + err := runRetrievalTest(c, n) + if err != nil { + t.Fatal(err) + } } } } } -//Every test runs 3 times, a live, a history, and a live AND history -func fileRetrievalTest(t *testing.T, nodeCount int) { - //test live and NO history - log.Info("Testing live and no history", "nodeCount", nodeCount) - live = true - history = false - err := runFileRetrievalTest(nodeCount) - if err != nil { - t.Fatal(err) - } - //test history only - log.Info("Testing history only", "nodeCount", nodeCount) - live = false - history = true - err = runFileRetrievalTest(nodeCount) - if err != nil { - t.Fatal(err) - } - //finally test live and history - log.Info("Testing live and history", "nodeCount", nodeCount) - live = true - err = runFileRetrievalTest(nodeCount) - if err != nil { - t.Fatal(err) - } -} - -//Every test runs 3 times, a live, a history, and a live AND history -func retrievalTest(t *testing.T, chunkCount int, nodeCount int) { - //test live and NO history - log.Info("Testing live and no history", "chunkCount", chunkCount, "nodeCount", nodeCount) - live = true - history = false - err := runRetrievalTest(chunkCount, nodeCount) - if err != nil { - t.Fatal(err) - } - //test history only - log.Info("Testing history only", "chunkCount", chunkCount, "nodeCount", nodeCount) - live = false - history = true - err = runRetrievalTest(chunkCount, nodeCount) - if err != nil { - t.Fatal(err) - } - //finally test live and history - log.Info("Testing live and history", "chunkCount", chunkCount, "nodeCount", nodeCount) - live = true - err = runRetrievalTest(chunkCount, nodeCount) - if err != nil { - t.Fatal(err) - } -} - /* -The upload is done by dependency to the global -`live` and `history` variables; - -If `live` is set, first stream subscriptions are established, -then files are uploaded to nodes. - -If `history` is enabled, first upload files, then build up subscriptions. - The test loads a snapshot file to construct the swarm network, assuming that the snapshot file identifies a healthy kademlia network. Nevertheless a health check runs in the @@ -199,261 +114,129 @@ simulation's `action` function. The snapshot should have 'streamer' in its service list. */ func runFileRetrievalTest(nodeCount int) error { - //for every run (live, history), int the variables - initRetrievalTest() - //the ids of the snapshot nodes, initiate only now as we need nodeCount - ids = make([]discover.NodeID, nodeCount) - //channel to check for disconnection errors - disconnectC := make(chan error) - //channel to close disconnection watcher routine - quitC := make(chan struct{}) - //the test conf (using same as in `snapshot_sync_test` - conf = &synctestConfig{} - //map of overlay address to discover ID - conf.addrToIdMap = make(map[string]discover.NodeID) - //array where the generated chunk hashes will be stored - conf.hashes = make([]storage.Address, 0) - //load nodes from the snapshot file - net, err := initNetWithSnapshot(nodeCount) - if err != nil { - return err - } - var rpcSubscriptionsWg sync.WaitGroup - //do cleanup after test is terminated - defer func() { - //shutdown the snapshot network - net.Shutdown() - //after the test, clean up local stores initialized with createLocalStoreForId - localStoreCleanup() - //finally clear all data directories - datadirsCleanup() - }() - //get the nodes of the network - nodes := net.GetNodes() - //iterate over all nodes... - for c := 0; c < len(nodes); c++ { - //create an array of discovery nodeIDS - ids[c] = nodes[c].ID() - a := network.ToOverlayAddr(ids[c].Bytes()) - //append it to the array of all overlay addresses - conf.addrs = append(conf.addrs, a) - conf.addrToIdMap[string(a)] = ids[c] - } + sim := simulation.New(map[string]simulation.ServiceFunc{ + "streamer": func(ctx *adapters.ServiceContext, bucket *sync.Map) (s node.Service, cleanup func(), err error) { - //needed for healthy call - ppmap = network.NewPeerPotMap(testMinProxBinSize, conf.addrs) - - //an array for the random files - var randomFiles []string - //channel to signal when the upload has finished - uploadFinished := make(chan struct{}) - //channel to trigger new node checks - trigger := make(chan discover.NodeID) - //simulation action - action := func(ctx context.Context) error { - //first run the health check on all nodes, - //wait until nodes are all healthy - ticker := time.NewTicker(200 * time.Millisecond) - defer ticker.Stop() - for range ticker.C { - healthy := true - for _, id := range ids { - r := registries[id] - //PeerPot for this node - addr := common.Bytes2Hex(r.addr.OAddr) - pp := ppmap[addr] - //call Healthy RPC - h := r.delivery.overlay.Healthy(pp) - //print info - log.Debug(r.delivery.overlay.String()) - log.Debug(fmt.Sprintf("IS HEALTHY: %t", h.GotNN && h.KnowNN && h.Full)) - if !h.GotNN || !h.Full { - healthy = false - break - } + id := ctx.Config.ID + addr := network.NewAddrFromNodeID(id) + store, datadir, err := createTestLocalStorageForID(id, addr) + if err != nil { + return nil, nil, err } - if healthy { - break + bucket.Store(bucketKeyStore, store) + cleanup = func() { + os.RemoveAll(datadir) + store.Close() } - } + localStore := store.(*storage.LocalStore) + db := storage.NewDBAPI(localStore) + kad := network.NewKademlia(addr.Over(), network.NewKadParams()) + delivery := NewDelivery(kad, db) - if history { - log.Info("Uploading for history") - //If testing only history, we upload the chunk(s) first - conf.hashes, randomFiles, err = uploadFilesToNodes(nodes) - if err != nil { - return err - } - } + r := NewRegistry(addr, delivery, db, state.NewInmemoryStore(), &RegistryOptions{ + DoSync: true, + SyncUpdateDelay: 3 * time.Second, + }) - //variables needed to wait for all subscriptions established before uploading - errc := make(chan error) - - //now setup and start event watching in order to know when we can upload - ctx, watchCancel := context.WithTimeout(context.Background(), MaxTimeout*time.Second) - defer watchCancel() - - log.Info("Setting up stream subscription") - //We need two iterations, one to subscribe to the subscription events - //(so we know when setup phase is finished), and one to - //actually run the stream subscriptions. We can't do it in the same iteration, - //because while the first nodes in the loop are setting up subscriptions, - //the latter ones have not subscribed to listen to peer events yet, - //and then we miss events. - - //first iteration: setup disconnection watcher and subscribe to peer events - for j, id := range ids { - log.Trace(fmt.Sprintf("Subscribe to subscription events: %d", j)) - client, err := net.GetNode(id).Client() - if err != nil { - return err - } - wsDoneC := watchSubscriptionEvents(ctx, id, client, errc, quitC) - // doneC is nil, the error happened which is sent to errc channel, already - if wsDoneC == nil { - continue - } - rpcSubscriptionsWg.Add(1) - go func() { - <-wsDoneC - rpcSubscriptionsWg.Done() - }() - - //watch for peers disconnecting - wdDoneC, err := streamTesting.WatchDisconnections(id, client, disconnectC, quitC) - if err != nil { - return err - } - rpcSubscriptionsWg.Add(1) - go func() { - <-wdDoneC - rpcSubscriptionsWg.Done() - }() - } + fileStore := storage.NewFileStore(storage.NewNetStore(localStore, nil), storage.NewFileStoreParams()) + bucket.Store(bucketKeyFileStore, fileStore) - //second iteration: start syncing and setup stream subscriptions - for j, id := range ids { - log.Trace(fmt.Sprintf("Start syncing and stream subscriptions: %d", j)) - client, err := net.GetNode(id).Client() - if err != nil { - return err - } - //start syncing! - var cnt int - err = client.CallContext(ctx, &cnt, "stream_startSyncing") - if err != nil { - return err - } - //increment the number of subscriptions we need to wait for - //by the count returned from startSyncing (SYNC subscriptions) - subscriptionCount += cnt - //now also add the number of RETRIEVAL_REQUEST subscriptions - for snid := range registries[id].peers { - subscriptionCount++ - err = client.CallContext(ctx, nil, "stream_subscribeStream", snid, NewStream(swarmChunkServerStreamName, "", false), nil, Top) - if err != nil { - return err - } - } - } + return r, cleanup, nil - //now wait until the number of expected subscriptions has been finished - //`watchSubscriptionEvents` will write with a `nil` value to errc - //every time a `SubscriptionMsg` has been received - for err := range errc { - if err != nil { - return err - } - //`nil` received, decrement count - subscriptionCount-- - //all subscriptions received - if subscriptionCount == 0 { - break - } - } + }, + }) + defer sim.Close() - log.Info("Stream subscriptions successfully requested, action terminated") + log.Info("Initializing test config") - if live { - //upload generated files to nodes - var hashes []storage.Address - var rfiles []string - hashes, rfiles, err = uploadFilesToNodes(nodes) - if err != nil { - return err - } - conf.hashes = append(conf.hashes, hashes...) - randomFiles = append(randomFiles, rfiles...) - //signal to the trigger loop that the upload has finished - uploadFinished <- struct{}{} - } + conf := &synctestConfig{} + //map of discover ID to indexes of chunks expected at that ID + conf.idToChunksMap = make(map[discover.NodeID][]int) + //map of overlay address to discover ID + conf.addrToIDMap = make(map[string]discover.NodeID) + //array where the generated chunk hashes will be stored + conf.hashes = make([]storage.Address, 0) - return nil + err := sim.UploadSnapshot(fmt.Sprintf("testing/snapshot_%d.json", nodeCount)) + if err != nil { + return err } - //check defines what will be checked during the test - check := func(ctx context.Context, id discover.NodeID) (bool, error) { - - select { - case <-ctx.Done(): - return false, ctx.Err() - case e := <-disconnectC: - log.Error(e.Error()) - return false, fmt.Errorf("Disconnect event detected, network unhealthy") - default: - } - log.Trace(fmt.Sprintf("Checking node: %s", id)) - //if there are more than one chunk, test only succeeds if all expected chunks are found - allSuccess := true - - //check on the node's FileStore (netstore) - fileStore := registries[id].fileStore - //check all chunks - for i, hash := range conf.hashes { - reader, _ := fileStore.Retrieve(hash) - //check that we can read the file size and that it corresponds to the generated file size - if s, err := reader.Size(nil); err != nil || s != int64(len(randomFiles[i])) { - allSuccess = false - log.Warn("Retrieve error", "err", err, "hash", hash, "nodeId", id) - } else { - log.Debug(fmt.Sprintf("File with root hash %x successfully retrieved", hash)) - } + ctx, cancelSimRun := context.WithTimeout(context.Background(), 1*time.Minute) + defer cancelSimRun() + + result := sim.Run(ctx, func(ctx context.Context, sim *simulation.Simulation) error { + nodeIDs := sim.UpNodeIDs() + for _, n := range nodeIDs { + //get the kademlia overlay address from this ID + a := network.ToOverlayAddr(n.Bytes()) + //append it to the array of all overlay addresses + conf.addrs = append(conf.addrs, a) + //the proximity calculation is on overlay addr, + //the p2p/simulations check func triggers on discover.NodeID, + //so we need to know which overlay addr maps to which nodeID + conf.addrToIDMap[string(a)] = n } - return allSuccess, nil - } + //an array for the random files + var randomFiles []string + //channel to signal when the upload has finished + //uploadFinished := make(chan struct{}) + //channel to trigger new node checks - //for each tick, run the checks on all nodes - timingTicker := time.NewTicker(5 * time.Second) - defer timingTicker.Stop() - go func() { - //for live upload, we should wait for uploads to have finished - //before starting to trigger the checks, due to file size - if live { - <-uploadFinished + conf.hashes, randomFiles, err = uploadFilesToNodes(sim) + if err != nil { + return err } - for range timingTicker.C { - for i := 0; i < len(ids); i++ { - log.Trace(fmt.Sprintf("triggering step %d, id %s", i, ids[i])) - trigger <- ids[i] - } + if _, err := sim.WaitTillHealthy(ctx, 2); err != nil { + return err } - }() - - log.Info("Starting simulation run...") - timeout := MaxTimeout * time.Second - ctx, cancel := context.WithTimeout(context.Background(), timeout) - defer cancel() - - //run the simulation - result := simulations.NewSimulation(net).Run(ctx, &simulations.Step{ - Action: action, - Trigger: trigger, - Expect: &simulations.Expectation{ - Nodes: ids, - Check: check, - }, + // File retrieval check is repeated until all uploaded files are retrieved from all nodes + // or until the timeout is reached. + allSuccess := false + for !allSuccess { + for _, id := range nodeIDs { + //for each expected chunk, check if it is in the local store + localChunks := conf.idToChunksMap[id] + localSuccess := true + for _, ch := range localChunks { + //get the real chunk by the index in the index array + chunk := conf.hashes[ch] + log.Trace(fmt.Sprintf("node has chunk: %s:", chunk)) + //check if the expected chunk is indeed in the localstore + var err error + //check on the node's FileStore (netstore) + item, ok := sim.NodeItem(id, bucketKeyFileStore) + if !ok { + return fmt.Errorf("No registry") + } + fileStore := item.(*storage.FileStore) + //check all chunks + for i, hash := range conf.hashes { + reader, _ := fileStore.Retrieve(context.TODO(), hash) + //check that we can read the file size and that it corresponds to the generated file size + if s, err := reader.Size(ctx, nil); err != nil || s != int64(len(randomFiles[i])) { + allSuccess = false + log.Warn("Retrieve error", "err", err, "hash", hash, "nodeId", id) + } else { + log.Debug(fmt.Sprintf("File with root hash %x successfully retrieved", hash)) + } + } + if err != nil { + log.Warn(fmt.Sprintf("Chunk %s NOT found for id %s", chunk, id)) + localSuccess = false + } else { + log.Debug(fmt.Sprintf("Chunk %s IS FOUND for id %s", chunk, id)) + } + } + allSuccess = localSuccess + } + } + if !allSuccess { + return fmt.Errorf("Not all chunks succeeded!") + } + return nil }) if result.Error != nil { @@ -466,14 +249,6 @@ func runFileRetrievalTest(nodeCount int) error { /* The test generates the given number of chunks. -The upload is done by dependency to the global -`live` and `history` variables; - -If `live` is set, first stream subscriptions are established, then -upload to a random node. - -If `history` is enabled, first upload then build up subscriptions. - The test loads a snapshot file to construct the swarm network, assuming that the snapshot file identifies a healthy kademlia network. Nevertheless a health check runs in the @@ -482,259 +257,129 @@ simulation's `action` function. The snapshot should have 'streamer' in its service list. */ func runRetrievalTest(chunkCount int, nodeCount int) error { - //for every run (live, history), int the variables - initRetrievalTest() - //the ids of the snapshot nodes, initiate only now as we need nodeCount - ids = make([]discover.NodeID, nodeCount) - //channel to check for disconnection errors - disconnectC := make(chan error) - //channel to close disconnection watcher routine - quitC := make(chan struct{}) - //the test conf (using same as in `snapshot_sync_test` - conf = &synctestConfig{} - //map of overlay address to discover ID - conf.addrToIdMap = make(map[string]discover.NodeID) - //array where the generated chunk hashes will be stored - conf.hashes = make([]storage.Address, 0) - //load nodes from the snapshot file - net, err := initNetWithSnapshot(nodeCount) - if err != nil { - return err - } - var rpcSubscriptionsWg sync.WaitGroup - //do cleanup after test is terminated - defer func() { - //shutdown the snapshot network - net.Shutdown() - //after the test, clean up local stores initialized with createLocalStoreForId - localStoreCleanup() - //finally clear all data directories - datadirsCleanup() - }() - //get the nodes of the network - nodes := net.GetNodes() - //select one index at random... - idx := rand.Intn(len(nodes)) - //...and get the the node at that index - //this is the node selected for upload - uploadNode := nodes[idx] - //iterate over all nodes... - for c := 0; c < len(nodes); c++ { - //create an array of discovery nodeIDS - ids[c] = nodes[c].ID() - a := network.ToOverlayAddr(ids[c].Bytes()) - //append it to the array of all overlay addresses - conf.addrs = append(conf.addrs, a) - conf.addrToIdMap[string(a)] = ids[c] - } + sim := simulation.New(map[string]simulation.ServiceFunc{ + "streamer": func(ctx *adapters.ServiceContext, bucket *sync.Map) (s node.Service, cleanup func(), err error) { - //needed for healthy call - ppmap = network.NewPeerPotMap(testMinProxBinSize, conf.addrs) - - trigger := make(chan discover.NodeID) - //simulation action - action := func(ctx context.Context) error { - //first run the health check on all nodes, - //wait until nodes are all healthy - ticker := time.NewTicker(200 * time.Millisecond) - defer ticker.Stop() - for range ticker.C { - healthy := true - for _, id := range ids { - r := registries[id] - //PeerPot for this node - addr := common.Bytes2Hex(network.ToOverlayAddr(id.Bytes())) - pp := ppmap[addr] - //call Healthy RPC - h := r.delivery.overlay.Healthy(pp) - //print info - log.Debug(r.delivery.overlay.String()) - log.Debug(fmt.Sprintf("IS HEALTHY: %t", h.GotNN && h.KnowNN && h.Full)) - if !h.GotNN || !h.Full { - healthy = false - break - } - } - if healthy { - break - } - } - - if history { - log.Info("Uploading for history") - //If testing only history, we upload the chunk(s) first - conf.hashes, err = uploadFileToSingleNodeStore(uploadNode.ID(), chunkCount) + id := ctx.Config.ID + addr := network.NewAddrFromNodeID(id) + store, datadir, err := createTestLocalStorageForID(id, addr) if err != nil { - return err + return nil, nil, err } - } - - //variables needed to wait for all subscriptions established before uploading - errc := make(chan error) - - //now setup and start event watching in order to know when we can upload - ctx, watchCancel := context.WithTimeout(context.Background(), MaxTimeout*time.Second) - defer watchCancel() - - log.Info("Setting up stream subscription") - //We need two iterations, one to subscribe to the subscription events - //(so we know when setup phase is finished), and one to - //actually run the stream subscriptions. We can't do it in the same iteration, - //because while the first nodes in the loop are setting up subscriptions, - //the latter ones have not subscribed to listen to peer events yet, - //and then we miss events. - - //first iteration: setup disconnection watcher and subscribe to peer events - for j, id := range ids { - log.Trace(fmt.Sprintf("Subscribe to subscription events: %d", j)) - client, err := net.GetNode(id).Client() - if err != nil { - return err + bucket.Store(bucketKeyStore, store) + cleanup = func() { + os.RemoveAll(datadir) + store.Close() } + localStore := store.(*storage.LocalStore) + db := storage.NewDBAPI(localStore) + kad := network.NewKademlia(addr.Over(), network.NewKadParams()) + delivery := NewDelivery(kad, db) - //check for `SubscribeMsg` events to know when setup phase is complete - wsDoneC := watchSubscriptionEvents(ctx, id, client, errc, quitC) - // doneC is nil, the error happened which is sent to errc channel, already - if wsDoneC == nil { - continue - } - rpcSubscriptionsWg.Add(1) - go func() { - <-wsDoneC - rpcSubscriptionsWg.Done() - }() - - //watch for peers disconnecting - wdDoneC, err := streamTesting.WatchDisconnections(id, client, disconnectC, quitC) - if err != nil { - return err - } - rpcSubscriptionsWg.Add(1) - go func() { - <-wdDoneC - rpcSubscriptionsWg.Done() - }() - } + r := NewRegistry(addr, delivery, db, state.NewInmemoryStore(), &RegistryOptions{ + DoSync: true, + SyncUpdateDelay: 0, + }) - //second iteration: start syncing and setup stream subscriptions - for j, id := range ids { - log.Trace(fmt.Sprintf("Start syncing and stream subscriptions: %d", j)) - client, err := net.GetNode(id).Client() - if err != nil { - return err - } - //start syncing! - var cnt int - err = client.CallContext(ctx, &cnt, "stream_startSyncing") - if err != nil { - return err - } - //increment the number of subscriptions we need to wait for - //by the count returned from startSyncing (SYNC subscriptions) - subscriptionCount += cnt - //now also add the number of RETRIEVAL_REQUEST subscriptions - for snid := range registries[id].peers { - subscriptionCount++ - err = client.CallContext(ctx, nil, "stream_subscribeStream", snid, NewStream(swarmChunkServerStreamName, "", false), nil, Top) - if err != nil { - return err - } - } - } + fileStore := storage.NewFileStore(storage.NewNetStore(localStore, nil), storage.NewFileStoreParams()) + bucketKeyFileStore = simulation.BucketKey("filestore") + bucket.Store(bucketKeyFileStore, fileStore) - //now wait until the number of expected subscriptions has been finished - //`watchSubscriptionEvents` will write with a `nil` value to errc - //every time a `SubscriptionMsg` has been received - for err := range errc { - if err != nil { - return err - } - //`nil` received, decrement count - subscriptionCount-- - //all subscriptions received - if subscriptionCount == 0 { - break - } - } + return r, cleanup, nil - log.Info("Stream subscriptions successfully requested, action terminated") + }, + }) + defer sim.Close() - if live { - //now upload the chunks to the selected random single node - chnks, err := uploadFileToSingleNodeStore(uploadNode.ID(), chunkCount) - if err != nil { - return err - } - conf.hashes = append(conf.hashes, chnks...) - } + conf := &synctestConfig{} + //map of discover ID to indexes of chunks expected at that ID + conf.idToChunksMap = make(map[discover.NodeID][]int) + //map of overlay address to discover ID + conf.addrToIDMap = make(map[string]discover.NodeID) + //array where the generated chunk hashes will be stored + conf.hashes = make([]storage.Address, 0) - return nil + err := sim.UploadSnapshot(fmt.Sprintf("testing/snapshot_%d.json", nodeCount)) + if err != nil { + return err } - chunkSize := storage.DefaultChunkSize - - //check defines what will be checked during the test - check := func(ctx context.Context, id discover.NodeID) (bool, error) { - - //don't check the uploader node - if id == uploadNode.ID() { - return true, nil + ctx := context.Background() + result := sim.Run(ctx, func(ctx context.Context, sim *simulation.Simulation) error { + nodeIDs := sim.UpNodeIDs() + for _, n := range nodeIDs { + //get the kademlia overlay address from this ID + a := network.ToOverlayAddr(n.Bytes()) + //append it to the array of all overlay addresses + conf.addrs = append(conf.addrs, a) + //the proximity calculation is on overlay addr, + //the p2p/simulations check func triggers on discover.NodeID, + //so we need to know which overlay addr maps to which nodeID + conf.addrToIDMap[string(a)] = n } - select { - case <-ctx.Done(): - return false, ctx.Err() - case e := <-disconnectC: - log.Error(e.Error()) - return false, fmt.Errorf("Disconnect event detected, network unhealthy") - default: + //an array for the random files + var randomFiles []string + //this is the node selected for upload + node := sim.RandomUpNode() + item, ok := sim.NodeItem(node.ID, bucketKeyStore) + if !ok { + return fmt.Errorf("No localstore") } - log.Trace(fmt.Sprintf("Checking node: %s", id)) - //if there are more than one chunk, test only succeeds if all expected chunks are found - allSuccess := true - - //check on the node's FileStore (netstore) - fileStore := registries[id].fileStore - //check all chunks - for _, chnk := range conf.hashes { - reader, _ := fileStore.Retrieve(chnk) - //assuming that reading the Size of the chunk is enough to know we found it - if s, err := reader.Size(nil); err != nil || s != chunkSize { - allSuccess = false - log.Warn("Retrieve error", "err", err, "chunk", chnk, "nodeId", id) - } else { - log.Debug(fmt.Sprintf("Chunk %x found", chnk)) - } + lstore := item.(*storage.LocalStore) + conf.hashes, err = uploadFileToSingleNodeStore(node.ID, chunkCount, lstore) + if err != nil { + return err + } + if _, err := sim.WaitTillHealthy(ctx, 2); err != nil { + return err } - return allSuccess, nil - } - //for each tick, run the checks on all nodes - timingTicker := time.NewTicker(5 * time.Second) - defer timingTicker.Stop() - go func() { - for range timingTicker.C { - for i := 0; i < len(ids); i++ { - log.Trace(fmt.Sprintf("triggering step %d, id %s", i, ids[i])) - trigger <- ids[i] + // File retrieval check is repeated until all uploaded files are retrieved from all nodes + // or until the timeout is reached. + allSuccess := false + for !allSuccess { + for _, id := range nodeIDs { + //for each expected chunk, check if it is in the local store + localChunks := conf.idToChunksMap[id] + localSuccess := true + for _, ch := range localChunks { + //get the real chunk by the index in the index array + chunk := conf.hashes[ch] + log.Trace(fmt.Sprintf("node has chunk: %s:", chunk)) + //check if the expected chunk is indeed in the localstore + var err error + //check on the node's FileStore (netstore) + item, ok := sim.NodeItem(id, bucketKeyFileStore) + if !ok { + return fmt.Errorf("No registry") + } + fileStore := item.(*storage.FileStore) + //check all chunks + for i, hash := range conf.hashes { + reader, _ := fileStore.Retrieve(context.TODO(), hash) + //check that we can read the file size and that it corresponds to the generated file size + if s, err := reader.Size(ctx, nil); err != nil || s != int64(len(randomFiles[i])) { + allSuccess = false + log.Warn("Retrieve error", "err", err, "hash", hash, "nodeId", id) + } else { + log.Debug(fmt.Sprintf("File with root hash %x successfully retrieved", hash)) + } + } + if err != nil { + log.Warn(fmt.Sprintf("Chunk %s NOT found for id %s", chunk, id)) + localSuccess = false + } else { + log.Debug(fmt.Sprintf("Chunk %s IS FOUND for id %s", chunk, id)) + } + } + allSuccess = localSuccess } } - }() - - log.Info("Starting simulation run...") - - timeout := MaxTimeout * time.Second - ctx, cancel := context.WithTimeout(context.Background(), timeout) - defer cancel() - - //run the simulation - result := simulations.NewSimulation(net).Run(ctx, &simulations.Step{ - Action: action, - Trigger: trigger, - Expect: &simulations.Expectation{ - Nodes: ids, - Check: check, - }, + if !allSuccess { + return fmt.Errorf("Not all chunks succeeded!") + } + return nil }) if result.Error != nil { @@ -743,49 +388,3 @@ func runRetrievalTest(chunkCount int, nodeCount int) error { return nil } - -//upload generated files to nodes -//every node gets one file uploaded -func uploadFilesToNodes(nodes []*simulations.Node) ([]storage.Address, []string, error) { - nodeCnt := len(nodes) - log.Debug(fmt.Sprintf("Uploading %d files to nodes", nodeCnt)) - //array holding generated files - rfiles := make([]string, nodeCnt) - //array holding the root hashes of the files - rootAddrs := make([]storage.Address, nodeCnt) - - var err error - //for every node, generate a file and upload - for i, n := range nodes { - id := n.ID() - fileStore := registries[id].fileStore - //generate a file - rfiles[i], err = generateRandomFile() - if err != nil { - return nil, nil, err - } - //store it (upload it) on the FileStore - rk, wait, err := fileStore.Store(strings.NewReader(rfiles[i]), int64(len(rfiles[i])), false) - log.Debug("Uploaded random string file to node") - wait() - if err != nil { - return nil, nil, err - } - rootAddrs[i] = rk - } - return rootAddrs, rfiles, nil -} - -//generate a random file (string) -func generateRandomFile() (string, error) { - //generate a random file size between minFileSize and maxFileSize - fileSize := rand.Intn(maxFileSize-minFileSize) + minFileSize - log.Debug(fmt.Sprintf("Generated file with filesize %d kB", fileSize)) - b := make([]byte, fileSize*1024) - _, err := crand.Read(b) - if err != nil { - log.Error("Error generating random file.", "err", err) - return "", err - } - return string(b), nil -} diff --git a/swarm/network/stream/snapshot_sync_test.go b/swarm/network/stream/snapshot_sync_test.go index ff1c39319d17..6acab50af47c 100644 --- a/swarm/network/stream/snapshot_sync_test.go +++ b/swarm/network/stream/snapshot_sync_test.go @@ -18,12 +18,8 @@ package stream import ( "context" crand "crypto/rand" - "encoding/json" - "flag" "fmt" "io" - "io/ioutil" - "math/rand" "os" "sync" "testing" @@ -31,82 +27,27 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/node" "github.com/ethereum/go-ethereum/p2p" "github.com/ethereum/go-ethereum/p2p/discover" - "github.com/ethereum/go-ethereum/p2p/simulations" "github.com/ethereum/go-ethereum/p2p/simulations/adapters" - "github.com/ethereum/go-ethereum/rpc" "github.com/ethereum/go-ethereum/swarm/network" - streamTesting "github.com/ethereum/go-ethereum/swarm/network/stream/testing" + "github.com/ethereum/go-ethereum/swarm/network/simulation" "github.com/ethereum/go-ethereum/swarm/pot" + "github.com/ethereum/go-ethereum/swarm/state" "github.com/ethereum/go-ethereum/swarm/storage" + mockdb "github.com/ethereum/go-ethereum/swarm/storage/mock/db" ) const testMinProxBinSize = 2 const MaxTimeout = 600 -var ( - pof = pot.DefaultPof(256) - - conf *synctestConfig - ids []discover.NodeID - datadirs map[discover.NodeID]string - ppmap map[string]*network.PeerPot - - live bool - history bool - - longrunning = flag.Bool("longrunning", false, "do run long-running tests") -) - type synctestConfig struct { addrs [][]byte hashes []storage.Address idToChunksMap map[discover.NodeID][]int chunksToNodesMap map[string][]int - addrToIdMap map[string]discover.NodeID -} - -func init() { - rand.Seed(time.Now().Unix()) -} - -//common_test needs to initialize the test in a init() func -//in order for adapters to register the NewStreamerService; -//this service is dependent on some global variables -//we thus need to initialize first as init() as well. -func initSyncTest() { - //assign the toAddr func so NewStreamerService can build the addr - toAddr = func(id discover.NodeID) *network.BzzAddr { - addr := network.NewAddrFromNodeID(id) - return addr - } - //global func to create local store - if *useMockStore { - createStoreFunc = createMockStore - } else { - createStoreFunc = createTestLocalStorageForId - } - //local stores - stores = make(map[discover.NodeID]storage.ChunkStore) - //data directories for each node and store - datadirs = make(map[discover.NodeID]string) - //deliveries for each node - deliveries = make(map[discover.NodeID]*Delivery) - //registries, map of discover.NodeID to its streamer - registries = make(map[discover.NodeID]*TestRegistry) - //not needed for this test but required from common_test for NewStreamService - waitPeerErrC = make(chan error) - //also not needed for this test but required for NewStreamService - peerCount = func(id discover.NodeID) int { - if ids[0] == id || ids[len(ids)-1] == id { - return 1 - } - return 2 - } - if *useMockStore { - createGlobalStore() - } + addrToIDMap map[string]discover.NodeID } //This test is a syncing test for nodes. @@ -116,12 +57,12 @@ func initSyncTest() { //to the pivot node, and we check that nodes get the chunks //they are expected to store based on the syncing protocol. //Number of chunks and nodes can be provided via commandline too. -func TestSyncing(t *testing.T) { +func TestSyncingViaGlobalSync(t *testing.T) { //if nodes/chunks have been provided via commandline, //run the tests with these values if *nodes != 0 && *chunks != 0 { log.Info(fmt.Sprintf("Running test with %d chunks and %d nodes...", *chunks, *nodes)) - testSyncing(t, *chunks, *nodes) + testSyncingViaGlobalSync(t, *chunks, *nodes) } else { var nodeCnt []int var chnkCnt []int @@ -138,230 +79,281 @@ func TestSyncing(t *testing.T) { for _, chnk := range chnkCnt { for _, n := range nodeCnt { log.Info(fmt.Sprintf("Long running test with %d chunks and %d nodes...", chnk, n)) - testSyncing(t, chnk, n) + testSyncingViaGlobalSync(t, chnk, n) } } } } -//Do run the tests -//Every test runs 3 times, a live, a history, and a live AND history -func testSyncing(t *testing.T, chunkCount int, nodeCount int) { - //test live and NO history - log.Info("Testing live and no history") - live = true - history = false - err := runSyncTest(chunkCount, nodeCount, live, history) - if err != nil { - t.Fatal(err) - } - //test history only - log.Info("Testing history only") - live = false - history = true - err = runSyncTest(chunkCount, nodeCount, live, history) - if err != nil { - t.Fatal(err) - } - //finally test live and history - log.Info("Testing live and history") - live = true - err = runSyncTest(chunkCount, nodeCount, live, history) - if err != nil { - t.Fatal(err) +func TestSyncingViaDirectSubscribe(t *testing.T) { + //if nodes/chunks have been provided via commandline, + //run the tests with these values + if *nodes != 0 && *chunks != 0 { + log.Info(fmt.Sprintf("Running test with %d chunks and %d nodes...", *chunks, *nodes)) + err := testSyncingViaDirectSubscribe(*chunks, *nodes) + if err != nil { + t.Fatal(err) + } + } else { + var nodeCnt []int + var chnkCnt []int + //if the `longrunning` flag has been provided + //run more test combinations + if *longrunning { + chnkCnt = []int{1, 8, 32, 256, 1024} + nodeCnt = []int{32, 16} + } else { + //default test + chnkCnt = []int{4, 32} + nodeCnt = []int{32, 16} + } + for _, chnk := range chnkCnt { + for _, n := range nodeCnt { + log.Info(fmt.Sprintf("Long running test with %d chunks and %d nodes...", chnk, n)) + err := testSyncingViaDirectSubscribe(chnk, n) + if err != nil { + t.Fatal(err) + } + } + } } } -/* -The test generates the given number of chunks +func testSyncingViaGlobalSync(t *testing.T, chunkCount int, nodeCount int) { + sim := simulation.New(map[string]simulation.ServiceFunc{ + "streamer": func(ctx *adapters.ServiceContext, bucket *sync.Map) (s node.Service, cleanup func(), err error) { -The upload is done by dependency to the global -`live` and `history` variables; + id := ctx.Config.ID + addr := network.NewAddrFromNodeID(id) + store, datadir, err := createTestLocalStorageForID(id, addr) + if err != nil { + return nil, nil, err + } + bucket.Store(bucketKeyStore, store) + cleanup = func() { + os.RemoveAll(datadir) + store.Close() + } + localStore := store.(*storage.LocalStore) + db := storage.NewDBAPI(localStore) + kad := network.NewKademlia(addr.Over(), network.NewKadParams()) + delivery := NewDelivery(kad, db) -If `live` is set, first stream subscriptions are established, then -upload to a random node. + r := NewRegistry(addr, delivery, db, state.NewInmemoryStore(), &RegistryOptions{ + DoSync: true, + SyncUpdateDelay: 3 * time.Second, + }) + bucket.Store(bucketKeyRegistry, r) -If `history` is enabled, first upload then build up subscriptions. + return r, cleanup, nil -For every chunk generated, the nearest node addresses -are identified, we verify that the nodes closer to the -chunk addresses actually do have the chunks in their local stores. + }, + }) + defer sim.Close() -The test loads a snapshot file to construct the swarm network, -assuming that the snapshot file identifies a healthy -kademlia network. The snapshot should have 'streamer' in its service list. + log.Info("Initializing test config") -For every test run, a series of three tests will be executed: -- a LIVE test first, where first subscriptions are established, - then a file (random chunks) is uploaded -- a HISTORY test, where the file is uploaded first, and then - the subscriptions are established -- a crude LIVE AND HISTORY test last, where (different) chunks - are uploaded twice, once before and once after subscriptions -*/ -func runSyncTest(chunkCount int, nodeCount int, live bool, history bool) error { - initSyncTest() - //the ids of the snapshot nodes, initiate only now as we need nodeCount - ids = make([]discover.NodeID, nodeCount) - //initialize the test struct - conf = &synctestConfig{} + conf := &synctestConfig{} //map of discover ID to indexes of chunks expected at that ID conf.idToChunksMap = make(map[discover.NodeID][]int) //map of overlay address to discover ID - conf.addrToIdMap = make(map[string]discover.NodeID) + conf.addrToIDMap = make(map[string]discover.NodeID) //array where the generated chunk hashes will be stored conf.hashes = make([]storage.Address, 0) - //channel to trigger node checks in the simulation - trigger := make(chan discover.NodeID) - //channel to check for disconnection errors - disconnectC := make(chan error) - //channel to close disconnection watcher routine - quitC := make(chan struct{}) - - //load nodes from the snapshot file - net, err := initNetWithSnapshot(nodeCount) + + err := sim.UploadSnapshot(fmt.Sprintf("testing/snapshot_%d.json", nodeCount)) if err != nil { - return err + t.Fatal(err) } - var rpcSubscriptionsWg sync.WaitGroup - //do cleanup after test is terminated - defer func() { - // close quitC channel to signall all goroutines to clanup - // before calling simulation network shutdown. - close(quitC) - //wait for all rpc subscriptions to unsubscribe - rpcSubscriptionsWg.Wait() - //shutdown the snapshot network - net.Shutdown() - //after the test, clean up local stores initialized with createLocalStoreForId - localStoreCleanup() - //finally clear all data directories - datadirsCleanup() - }() - //get the nodes of the network - nodes := net.GetNodes() - //select one index at random... - idx := rand.Intn(len(nodes)) - //...and get the the node at that index - //this is the node selected for upload - node := nodes[idx] - log.Info("Initializing test config") - //iterate over all nodes... - for c := 0; c < len(nodes); c++ { - //create an array of discovery node IDs - ids[c] = nodes[c].ID() - //get the kademlia overlay address from this ID - a := network.ToOverlayAddr(ids[c].Bytes()) - //append it to the array of all overlay addresses - conf.addrs = append(conf.addrs, a) - //the proximity calculation is on overlay addr, - //the p2p/simulations check func triggers on discover.NodeID, - //so we need to know which overlay addr maps to which nodeID - conf.addrToIdMap[string(a)] = ids[c] - } - log.Info("Test config successfully initialized") - - //only needed for healthy call when debugging - ppmap = network.NewPeerPotMap(testMinProxBinSize, conf.addrs) - - //define the action to be performed before the test checks: start syncing - action := func(ctx context.Context) error { - //first run the health check on all nodes, - //wait until nodes are all healthy - ticker := time.NewTicker(200 * time.Millisecond) - defer ticker.Stop() - for range ticker.C { - healthy := true - for _, id := range ids { - r := registries[id] - //PeerPot for this node - addr := common.Bytes2Hex(network.ToOverlayAddr(id.Bytes())) - pp := ppmap[addr] - //call Healthy RPC - h := r.delivery.overlay.Healthy(pp) - //print info - log.Debug(r.delivery.overlay.String()) - log.Debug(fmt.Sprintf("IS HEALTHY: %t", h.GotNN && h.KnowNN && h.Full)) - if !h.GotNN || !h.Full { - healthy = false - break - } - } - if healthy { - break - } + ctx, cancelSimRun := context.WithTimeout(context.Background(), 1*time.Minute) + defer cancelSimRun() + + result := sim.Run(ctx, func(ctx context.Context, sim *simulation.Simulation) error { + nodeIDs := sim.UpNodeIDs() + for _, n := range nodeIDs { + //get the kademlia overlay address from this ID + a := network.ToOverlayAddr(n.Bytes()) + //append it to the array of all overlay addresses + conf.addrs = append(conf.addrs, a) + //the proximity calculation is on overlay addr, + //the p2p/simulations check func triggers on discover.NodeID, + //so we need to know which overlay addr maps to which nodeID + conf.addrToIDMap[string(a)] = n + } + + //get the the node at that index + //this is the node selected for upload + node := sim.RandomUpNode() + item, ok := sim.NodeItem(node.ID, bucketKeyStore) + if !ok { + return fmt.Errorf("No localstore") + } + lstore := item.(*storage.LocalStore) + hashes, err := uploadFileToSingleNodeStore(node.ID, chunkCount, lstore) + if err != nil { + return err + } + conf.hashes = append(conf.hashes, hashes...) + mapKeysToNodes(conf) + + if _, err := sim.WaitTillHealthy(ctx, 2); err != nil { + return err } - if history { - log.Info("Uploading for history") - //If testing only history, we upload the chunk(s) first - chunks, err := uploadFileToSingleNodeStore(node.ID(), chunkCount) + // File retrieval check is repeated until all uploaded files are retrieved from all nodes + // or until the timeout is reached. + allSuccess := false + var gDir string + var globalStore *mockdb.GlobalStore + if *useMockStore { + gDir, globalStore, err = createGlobalStore() if err != nil { - return err + return fmt.Errorf("Something went wrong; using mockStore enabled but globalStore is nil") } - conf.hashes = append(conf.hashes, chunks...) - //finally map chunks to the closest addresses - mapKeysToNodes(conf) + defer func() { + os.RemoveAll(gDir) + err := globalStore.Close() + if err != nil { + log.Error("Error closing global store! %v", "err", err) + } + }() } + for !allSuccess { + for _, id := range nodeIDs { + //for each expected chunk, check if it is in the local store + localChunks := conf.idToChunksMap[id] + localSuccess := true + for _, ch := range localChunks { + //get the real chunk by the index in the index array + chunk := conf.hashes[ch] + log.Trace(fmt.Sprintf("node has chunk: %s:", chunk)) + //check if the expected chunk is indeed in the localstore + var err error + if *useMockStore { + //use the globalStore if the mockStore should be used; in that case, + //the complete localStore stack is bypassed for getting the chunk + _, err = globalStore.Get(common.BytesToAddress(id.Bytes()), chunk) + } else { + //use the actual localstore + item, ok := sim.NodeItem(id, bucketKeyStore) + if !ok { + return fmt.Errorf("Error accessing localstore") + } + lstore := item.(*storage.LocalStore) + _, err = lstore.Get(ctx, chunk) + } + if err != nil { + log.Warn(fmt.Sprintf("Chunk %s NOT found for id %s", chunk, id)) + localSuccess = false + // Do not get crazy with logging the warn message + time.Sleep(500 * time.Millisecond) + } else { + log.Debug(fmt.Sprintf("Chunk %s IS FOUND for id %s", chunk, id)) + } + } + allSuccess = localSuccess + } + } + if !allSuccess { + return fmt.Errorf("Not all chunks succeeded!") + } + return nil + }) - //variables needed to wait for all subscriptions established before uploading - errc := make(chan error) + if result.Error != nil { + t.Fatal(result.Error) + } +} - //now setup and start event watching in order to know when we can upload - ctx, watchCancel := context.WithTimeout(context.Background(), MaxTimeout*time.Second) - defer watchCancel() +/* +The test generates the given number of chunks - log.Info("Setting up stream subscription") +For every chunk generated, the nearest node addresses +are identified, we verify that the nodes closer to the +chunk addresses actually do have the chunks in their local stores. - //We need two iterations, one to subscribe to the subscription events - //(so we know when setup phase is finished), and one to - //actually run the stream subscriptions. We can't do it in the same iteration, - //because while the first nodes in the loop are setting up subscriptions, - //the latter ones have not subscribed to listen to peer events yet, - //and then we miss events. +The test loads a snapshot file to construct the swarm network, +assuming that the snapshot file identifies a healthy +kademlia network. The snapshot should have 'streamer' in its service list. +*/ +func testSyncingViaDirectSubscribe(chunkCount int, nodeCount int) error { + sim := simulation.New(map[string]simulation.ServiceFunc{ + "streamer": func(ctx *adapters.ServiceContext, bucket *sync.Map) (s node.Service, cleanup func(), err error) { - //first iteration: setup disconnection watcher and subscribe to peer events - for j, id := range ids { - log.Trace(fmt.Sprintf("Subscribe to subscription events: %d", j)) - client, err := net.GetNode(id).Client() + id := ctx.Config.ID + addr := network.NewAddrFromNodeID(id) + store, datadir, err := createTestLocalStorageForID(id, addr) if err != nil { - return err + return nil, nil, err } - - wsDoneC := watchSubscriptionEvents(ctx, id, client, errc, quitC) - // doneC is nil, the error happened which is sent to errc channel, already - if wsDoneC == nil { - continue + bucket.Store(bucketKeyStore, store) + cleanup = func() { + os.RemoveAll(datadir) + store.Close() } - rpcSubscriptionsWg.Add(1) - go func() { - <-wsDoneC - rpcSubscriptionsWg.Done() - }() + localStore := store.(*storage.LocalStore) + db := storage.NewDBAPI(localStore) + kad := network.NewKademlia(addr.Over(), network.NewKadParams()) + delivery := NewDelivery(kad, db) - //watch for peers disconnecting - wdDoneC, err := streamTesting.WatchDisconnections(id, client, disconnectC, quitC) - if err != nil { - return err - } - rpcSubscriptionsWg.Add(1) - go func() { - <-wdDoneC - rpcSubscriptionsWg.Done() - }() + r := NewRegistry(addr, delivery, db, state.NewInmemoryStore(), nil) + bucket.Store(bucketKeyRegistry, r) + + fileStore := storage.NewFileStore(storage.NewNetStore(localStore, nil), storage.NewFileStoreParams()) + bucket.Store(bucketKeyFileStore, fileStore) + + return r, cleanup, nil + + }, + }) + defer sim.Close() + + ctx, cancelSimRun := context.WithTimeout(context.Background(), 1*time.Minute) + defer cancelSimRun() + + conf := &synctestConfig{} + //map of discover ID to indexes of chunks expected at that ID + conf.idToChunksMap = make(map[discover.NodeID][]int) + //map of overlay address to discover ID + conf.addrToIDMap = make(map[string]discover.NodeID) + //array where the generated chunk hashes will be stored + conf.hashes = make([]storage.Address, 0) + + err := sim.UploadSnapshot(fmt.Sprintf("testing/snapshot_%d.json", nodeCount)) + if err != nil { + return err + } + + result := sim.Run(ctx, func(ctx context.Context, sim *simulation.Simulation) error { + nodeIDs := sim.UpNodeIDs() + for _, n := range nodeIDs { + //get the kademlia overlay address from this ID + a := network.ToOverlayAddr(n.Bytes()) + //append it to the array of all overlay addresses + conf.addrs = append(conf.addrs, a) + //the proximity calculation is on overlay addr, + //the p2p/simulations check func triggers on discover.NodeID, + //so we need to know which overlay addr maps to which nodeID + conf.addrToIDMap[string(a)] = n } - //second iteration: start syncing - for j, id := range ids { + var subscriptionCount int + + filter := simulation.NewPeerEventsFilter().Type(p2p.PeerEventTypeMsgRecv).Protocol("stream").MsgCode(4) + eventC := sim.PeerEvents(ctx, nodeIDs, filter) + + for j, node := range nodeIDs { log.Trace(fmt.Sprintf("Start syncing subscriptions: %d", j)) - client, err := net.GetNode(id).Client() - if err != nil { - return err - } //start syncing! + item, ok := sim.NodeItem(node, bucketKeyRegistry) + if !ok { + return fmt.Errorf("No registry") + } + registry := item.(*Registry) + var cnt int - err = client.CallContext(ctx, &cnt, "stream_startSyncing") + cnt, err = startSyncing(registry, conf) if err != nil { return err } @@ -370,117 +362,91 @@ func runSyncTest(chunkCount int, nodeCount int, live bool, history bool) error { subscriptionCount += cnt } - //now wait until the number of expected subscriptions has been finished - //`watchSubscriptionEvents` will write with a `nil` value to errc - for err := range errc { - if err != nil { - return err + for e := range eventC { + if e.Error != nil { + return e.Error } - //`nil` received, decrement count subscriptionCount-- - //all subscriptions received if subscriptionCount == 0 { break } } - - log.Info("Stream subscriptions successfully requested") - if live { - //now upload the chunks to the selected random single node - hashes, err := uploadFileToSingleNodeStore(node.ID(), chunkCount) - if err != nil { - return err - } - conf.hashes = append(conf.hashes, hashes...) - //finally map chunks to the closest addresses - log.Debug(fmt.Sprintf("Uploaded chunks for live syncing: %v", conf.hashes)) - mapKeysToNodes(conf) - log.Info(fmt.Sprintf("Uploaded %d chunks to random single node", chunkCount)) + //select a random node for upload + node := sim.RandomUpNode() + item, ok := sim.NodeItem(node.ID, bucketKeyStore) + if !ok { + return fmt.Errorf("No localstore") } + lstore := item.(*storage.LocalStore) + hashes, err := uploadFileToSingleNodeStore(node.ID, chunkCount, lstore) + if err != nil { + return err + } + conf.hashes = append(conf.hashes, hashes...) + mapKeysToNodes(conf) - log.Info("Action terminated") - - return nil - } - - //check defines what will be checked during the test - check := func(ctx context.Context, id discover.NodeID) (bool, error) { - select { - case <-ctx.Done(): - return false, ctx.Err() - case e := <-disconnectC: - log.Error(e.Error()) - return false, fmt.Errorf("Disconnect event detected, network unhealthy") - default: + if _, err := sim.WaitTillHealthy(ctx, 2); err != nil { + return err } - log.Trace(fmt.Sprintf("Checking node: %s", id)) - //select the local store for the given node - //if there are more than one chunk, test only succeeds if all expected chunks are found - allSuccess := true - - //all the chunk indexes which are supposed to be found for this node - localChunks := conf.idToChunksMap[id] - //for each expected chunk, check if it is in the local store - for _, ch := range localChunks { - //get the real chunk by the index in the index array - chunk := conf.hashes[ch] - log.Trace(fmt.Sprintf("node has chunk: %s:", chunk)) - //check if the expected chunk is indeed in the localstore - var err error - if *useMockStore { - if globalStore == nil { - return false, fmt.Errorf("Something went wrong; using mockStore enabled but globalStore is nil") - } - //use the globalStore if the mockStore should be used; in that case, - //the complete localStore stack is bypassed for getting the chunk - _, err = globalStore.Get(common.BytesToAddress(id.Bytes()), chunk) - } else { - //use the actual localstore - lstore := stores[id] - _, err = lstore.Get(chunk) - } + + var gDir string + var globalStore *mockdb.GlobalStore + if *useMockStore { + gDir, globalStore, err = createGlobalStore() if err != nil { - log.Warn(fmt.Sprintf("Chunk %s NOT found for id %s", chunk, id)) - allSuccess = false - } else { - log.Debug(fmt.Sprintf("Chunk %s IS FOUND for id %s", chunk, id)) + return fmt.Errorf("Something went wrong; using mockStore enabled but globalStore is nil") } + defer os.RemoveAll(gDir) } - - return allSuccess, nil - } - - //for each tick, run the checks on all nodes - timingTicker := time.NewTicker(time.Second * 1) - defer timingTicker.Stop() - go func() { - for range timingTicker.C { - for i := 0; i < len(ids); i++ { - log.Trace(fmt.Sprintf("triggering step %d, id %s", i, ids[i])) - trigger <- ids[i] + // File retrieval check is repeated until all uploaded files are retrieved from all nodes + // or until the timeout is reached. + allSuccess := false + for !allSuccess { + for _, id := range nodeIDs { + //for each expected chunk, check if it is in the local store + localChunks := conf.idToChunksMap[id] + localSuccess := true + for _, ch := range localChunks { + //get the real chunk by the index in the index array + chunk := conf.hashes[ch] + log.Trace(fmt.Sprintf("node has chunk: %s:", chunk)) + //check if the expected chunk is indeed in the localstore + var err error + if *useMockStore { + //use the globalStore if the mockStore should be used; in that case, + //the complete localStore stack is bypassed for getting the chunk + _, err = globalStore.Get(common.BytesToAddress(id.Bytes()), chunk) + } else { + //use the actual localstore + item, ok := sim.NodeItem(id, bucketKeyStore) + if !ok { + return fmt.Errorf("Error accessing localstore") + } + lstore := item.(*storage.LocalStore) + _, err = lstore.Get(ctx, chunk) + } + if err != nil { + log.Warn(fmt.Sprintf("Chunk %s NOT found for id %s", chunk, id)) + localSuccess = false + // Do not get crazy with logging the warn message + time.Sleep(500 * time.Millisecond) + } else { + log.Debug(fmt.Sprintf("Chunk %s IS FOUND for id %s", chunk, id)) + } + } + allSuccess = localSuccess } } - }() - - log.Info("Starting simulation run...") - - timeout := MaxTimeout * time.Second - ctx, cancel := context.WithTimeout(context.Background(), timeout) - defer cancel() - - //run the simulation - result := simulations.NewSimulation(net).Run(ctx, &simulations.Step{ - Action: action, - Trigger: trigger, - Expect: &simulations.Expectation{ - Nodes: ids, - Check: check, - }, + if !allSuccess { + return fmt.Errorf("Not all chunks succeeded!") + } + return nil }) if result.Error != nil { return result.Error } + log.Info("Simulation terminated") return nil } @@ -489,20 +455,9 @@ func runSyncTest(chunkCount int, nodeCount int, live bool, history bool) error { //issues `RequestSubscriptionMsg` to peers, based on po, by iterating over //the kademlia's `EachBin` function. //returns the number of subscriptions requested -func (r *TestRegistry) StartSyncing(ctx context.Context) (int, error) { +func startSyncing(r *Registry, conf *synctestConfig) (int, error) { var err error - if log.Lvl(*loglevel) == log.LvlDebug { - //PeerPot for this node - addr := common.Bytes2Hex(r.addr.OAddr) - pp := ppmap[addr] - //call Healthy RPC - h := r.delivery.overlay.Healthy(pp) - //print info - log.Debug(r.delivery.overlay.String()) - log.Debug(fmt.Sprintf("IS HEALTHY: %t", h.GotNN && h.KnowNN && h.Full)) - } - kad, ok := r.delivery.overlay.(*network.Kademlia) if !ok { return 0, fmt.Errorf("Not a Kademlia!") @@ -512,14 +467,10 @@ func (r *TestRegistry) StartSyncing(ctx context.Context) (int, error) { //iterate over each bin and solicit needed subscription to bins kad.EachBin(r.addr.Over(), pof, 0, func(conn network.OverlayConn, po int) bool { //identify begin and start index of the bin(s) we want to subscribe to - log.Debug(fmt.Sprintf("Requesting subscription by: registry %s from peer %s for bin: %d", r.addr.ID(), conf.addrToIdMap[string(conn.Address())], po)) - var histRange *Range - if history { - histRange = &Range{} - } + histRange := &Range{} subCnt++ - err = r.RequestSubscription(conf.addrToIdMap[string(conn.Address())], NewStream("SYNC", FormatSyncBinKey(uint8(po)), live), histRange, Top) + err = r.RequestSubscription(conf.addrToIDMap[string(conn.Address())], NewStream("SYNC", FormatSyncBinKey(uint8(po)), true), histRange, Top) if err != nil { log.Error(fmt.Sprintf("Error in RequestSubsciption! %v", err)) return false @@ -552,7 +503,7 @@ func mapKeysToNodes(conf *synctestConfig) { return false } if pl == 256 || pl == po { - log.Trace(fmt.Sprintf("appending %s", conf.addrToIdMap[string(a)])) + log.Trace(fmt.Sprintf("appending %s", conf.addrToIDMap[string(a)])) nns = append(nns, indexmap[string(a)]) nodemap[string(a)] = append(nodemap[string(a)], i) } @@ -567,153 +518,29 @@ func mapKeysToNodes(conf *synctestConfig) { } for addr, chunks := range nodemap { //this selects which chunks are expected to be found with the given node - conf.idToChunksMap[conf.addrToIdMap[addr]] = chunks + conf.idToChunksMap[conf.addrToIDMap[addr]] = chunks } log.Debug(fmt.Sprintf("Map of expected chunks by ID: %v", conf.idToChunksMap)) conf.chunksToNodesMap = kmap } //upload a file(chunks) to a single local node store -func uploadFileToSingleNodeStore(id discover.NodeID, chunkCount int) ([]storage.Address, error) { +func uploadFileToSingleNodeStore(id discover.NodeID, chunkCount int, lstore *storage.LocalStore) ([]storage.Address, error) { log.Debug(fmt.Sprintf("Uploading to node id: %s", id)) - lstore := stores[id] - size := chunkSize fileStore := storage.NewFileStore(lstore, storage.NewFileStoreParams()) + size := chunkSize var rootAddrs []storage.Address for i := 0; i < chunkCount; i++ { - rk, wait, err := fileStore.Store(io.LimitReader(crand.Reader, int64(size)), int64(size), false) - wait() + rk, wait, err := fileStore.Store(context.TODO(), io.LimitReader(crand.Reader, int64(size)), int64(size), false) if err != nil { return nil, err } - rootAddrs = append(rootAddrs, (rk)) - } - - return rootAddrs, nil -} - -//initialize a network from a snapshot -func initNetWithSnapshot(nodeCount int) (*simulations.Network, error) { - - var a adapters.NodeAdapter - //add the streamer service to the node adapter - - if *adapter == "exec" { - dirname, err := ioutil.TempDir(".", "") + err = wait(context.TODO()) if err != nil { return nil, err } - a = adapters.NewExecAdapter(dirname) - } else if *adapter == "tcp" { - a = adapters.NewTCPAdapter(services) - } else if *adapter == "sim" { - a = adapters.NewSimAdapter(services) - } - - log.Info("Setting up Snapshot network") - - net := simulations.NewNetwork(a, &simulations.NetworkConfig{ - ID: "0", - DefaultService: "streamer", - }) - - f, err := os.Open(fmt.Sprintf("testing/snapshot_%d.json", nodeCount)) - if err != nil { - return nil, err - } - defer f.Close() - jsonbyte, err := ioutil.ReadAll(f) - if err != nil { - return nil, err - } - var snap simulations.Snapshot - err = json.Unmarshal(jsonbyte, &snap) - if err != nil { - return nil, err - } - - //the snapshot probably has the property EnableMsgEvents not set - //just in case, set it to true! - //(we need this to wait for messages before uploading) - for _, n := range snap.Nodes { - n.Node.Config.EnableMsgEvents = true - } - - log.Info("Waiting for p2p connections to be established...") - - //now we can load the snapshot - err = net.Load(&snap) - if err != nil { - return nil, err - } - log.Info("Snapshot loaded") - return net, nil -} - -//we want to wait for subscriptions to be established before uploading to test -//that live syncing is working correctly -func watchSubscriptionEvents(ctx context.Context, id discover.NodeID, client *rpc.Client, errc chan error, quitC chan struct{}) (doneC <-chan struct{}) { - events := make(chan *p2p.PeerEvent) - sub, err := client.Subscribe(context.Background(), "admin", events, "peerEvents") - if err != nil { - log.Error(err.Error()) - errc <- fmt.Errorf("error getting peer events for node %v: %s", id, err) - return + rootAddrs = append(rootAddrs, (rk)) } - c := make(chan struct{}) - - go func() { - defer func() { - log.Trace("watch subscription events: unsubscribe", "id", id) - sub.Unsubscribe() - close(c) - }() - - for { - select { - case <-quitC: - return - case <-ctx.Done(): - select { - case errc <- ctx.Err(): - case <-quitC: - } - return - case e := <-events: - //just catch SubscribeMsg - if e.Type == p2p.PeerEventTypeMsgRecv && e.Protocol == "stream" && e.MsgCode != nil && *e.MsgCode == 4 { - errc <- nil - } - case err := <-sub.Err(): - if err != nil { - select { - case errc <- fmt.Errorf("error getting peer events for node %v: %v", id, err): - case <-quitC: - } - return - } - } - } - }() - return c -} -//create a local store for the given node -func createTestLocalStorageForId(id discover.NodeID, addr *network.BzzAddr) (storage.ChunkStore, error) { - var datadir string - var err error - datadir, err = ioutil.TempDir("", fmt.Sprintf("syncer-test-%s", id.TerminalString())) - if err != nil { - return nil, err - } - datadirs[id] = datadir - var store storage.ChunkStore - params := storage.NewDefaultLocalStoreParams() - params.ChunkDbPath = datadir - params.BaseKey = addr.Over() - store, err = storage.NewTestLocalStoreForAddr(params) - if err != nil { - return nil, err - } - return store, nil + return rootAddrs, nil } diff --git a/swarm/network/stream/stream.go b/swarm/network/stream/stream.go index 9b4658c51e7a..cd0580a0c06e 100644 --- a/swarm/network/stream/stream.go +++ b/swarm/network/stream/stream.go @@ -32,8 +32,10 @@ import ( "github.com/ethereum/go-ethereum/swarm/network" "github.com/ethereum/go-ethereum/swarm/network/stream/intervals" "github.com/ethereum/go-ethereum/swarm/pot" + "github.com/ethereum/go-ethereum/swarm/spancontext" "github.com/ethereum/go-ethereum/swarm/state" "github.com/ethereum/go-ethereum/swarm/storage" + opentracing "github.com/opentracing/opentracing-go" ) const ( @@ -235,7 +237,7 @@ func (r *Registry) RequestSubscription(peerId discover.NodeID, s Stream, h *Rang if e, ok := err.(*notFoundError); ok && e.t == "server" { // request subscription only if the server for this stream is not created log.Debug("RequestSubscription ", "peer", peerId, "stream", s, "history", h) - return peer.Send(&RequestSubscriptionMsg{ + return peer.Send(context.TODO(), &RequestSubscriptionMsg{ Stream: s, History: h, Priority: prio, @@ -285,7 +287,7 @@ func (r *Registry) Subscribe(peerId discover.NodeID, s Stream, h *Range, priorit } log.Debug("Subscribe ", "peer", peerId, "stream", s, "history", h) - return peer.SendPriority(msg, priority) + return peer.SendPriority(context.TODO(), msg, priority) } func (r *Registry) Unsubscribe(peerId discover.NodeID, s Stream) error { @@ -299,7 +301,7 @@ func (r *Registry) Unsubscribe(peerId discover.NodeID, s Stream) error { } log.Debug("Unsubscribe ", "peer", peerId, "stream", s) - if err := peer.Send(msg); err != nil { + if err := peer.Send(context.TODO(), msg); err != nil { return err } return peer.removeClient(s) @@ -320,11 +322,17 @@ func (r *Registry) Quit(peerId discover.NodeID, s Stream) error { } log.Debug("Quit ", "peer", peerId, "stream", s) - return peer.Send(msg) + return peer.Send(context.TODO(), msg) } -func (r *Registry) Retrieve(chunk *storage.Chunk) error { - return r.delivery.RequestFromPeers(chunk.Addr[:], r.skipCheck) +func (r *Registry) Retrieve(ctx context.Context, chunk *storage.Chunk) error { + var sp opentracing.Span + ctx, sp = spancontext.StartSpan( + ctx, + "registry.retrieve") + defer sp.Finish() + + return r.delivery.RequestFromPeers(ctx, chunk.Addr[:], r.skipCheck) } func (r *Registry) NodeInfo() interface{} { @@ -460,11 +468,11 @@ func (r *Registry) runProtocol(p *p2p.Peer, rw p2p.MsgReadWriter) error { } // HandleMsg is the message handler that delegates incoming messages -func (p *Peer) HandleMsg(msg interface{}) error { +func (p *Peer) HandleMsg(ctx context.Context, msg interface{}) error { switch msg := msg.(type) { case *SubscribeMsg: - return p.handleSubscribeMsg(msg) + return p.handleSubscribeMsg(ctx, msg) case *SubscribeErrorMsg: return p.handleSubscribeErrorMsg(msg) @@ -473,22 +481,22 @@ func (p *Peer) HandleMsg(msg interface{}) error { return p.handleUnsubscribeMsg(msg) case *OfferedHashesMsg: - return p.handleOfferedHashesMsg(msg) + return p.handleOfferedHashesMsg(ctx, msg) case *TakeoverProofMsg: - return p.handleTakeoverProofMsg(msg) + return p.handleTakeoverProofMsg(ctx, msg) case *WantedHashesMsg: - return p.handleWantedHashesMsg(msg) + return p.handleWantedHashesMsg(ctx, msg) case *ChunkDeliveryMsg: - return p.streamer.delivery.handleChunkDeliveryMsg(p, msg) + return p.streamer.delivery.handleChunkDeliveryMsg(ctx, p, msg) case *RetrieveRequestMsg: - return p.streamer.delivery.handleRetrieveRequestMsg(p, msg) + return p.streamer.delivery.handleRetrieveRequestMsg(ctx, p, msg) case *RequestSubscriptionMsg: - return p.handleRequestSubscription(msg) + return p.handleRequestSubscription(ctx, msg) case *QuitMsg: return p.handleQuitMsg(msg) @@ -508,7 +516,7 @@ type server struct { // Server interface for outgoing peer Streamer type Server interface { SetNextBatch(uint64, uint64) (hashes []byte, from uint64, to uint64, proof *HandoverProof, err error) - GetData([]byte) ([]byte, error) + GetData(context.Context, []byte) ([]byte, error) Close() } @@ -551,7 +559,7 @@ func (c client) NextInterval() (start, end uint64, err error) { // Client interface for incoming peer Streamer type Client interface { - NeedData([]byte) func() + NeedData(context.Context, []byte) func() BatchDone(Stream, uint64, []byte, []byte) func() (*TakeoverProof, error) Close() } @@ -588,7 +596,7 @@ func (c *client) batchDone(p *Peer, req *OfferedHashesMsg, hashes []byte) error if err != nil { return err } - if err := p.SendPriority(tp, c.priority); err != nil { + if err := p.SendPriority(context.TODO(), tp, c.priority); err != nil { return err } if c.to > 0 && tp.Takeover.End >= c.to { @@ -645,7 +653,7 @@ func (c *clientParams) clientCreated() { // Spec is the spec of the streamer protocol var Spec = &protocols.Spec{ Name: "stream", - Version: 4, + Version: 5, MaxMsgSize: 10 * 1024 * 1024, Messages: []interface{}{ UnsubscribeMsg{}, diff --git a/swarm/network/stream/streamer_test.go b/swarm/network/stream/streamer_test.go index 44622c995574..7523860c9c09 100644 --- a/swarm/network/stream/streamer_test.go +++ b/swarm/network/stream/streamer_test.go @@ -18,6 +18,7 @@ package stream import ( "bytes" + "context" "testing" "time" @@ -79,7 +80,7 @@ func newTestClient(t string) *testClient { } } -func (self *testClient) NeedData(hash []byte) func() { +func (self *testClient) NeedData(ctx context.Context, hash []byte) func() { self.receivedHashes[string(hash)] = hash if bytes.Equal(hash, hash0[:]) { return func() { @@ -114,7 +115,7 @@ func (self *testServer) SetNextBatch(from uint64, to uint64) ([]byte, uint64, ui return make([]byte, HashSize), from + 1, to + 1, nil, nil } -func (self *testServer) GetData([]byte) ([]byte, error) { +func (self *testServer) GetData(context.Context, []byte) ([]byte, error) { return nil, nil } diff --git a/swarm/network/stream/syncer.go b/swarm/network/stream/syncer.go index 5510b2409e93..d7febe4a3efc 100644 --- a/swarm/network/stream/syncer.go +++ b/swarm/network/stream/syncer.go @@ -17,6 +17,7 @@ package stream import ( + "context" "math" "strconv" "time" @@ -78,8 +79,8 @@ func (s *SwarmSyncerServer) Close() { } // GetSection retrieves the actual chunk from localstore -func (s *SwarmSyncerServer) GetData(key []byte) ([]byte, error) { - chunk, err := s.db.Get(storage.Address(key)) +func (s *SwarmSyncerServer) GetData(ctx context.Context, key []byte) ([]byte, error) { + chunk, err := s.db.Get(ctx, storage.Address(key)) if err == storage.ErrFetching { <-chunk.ReqC } else if err != nil { @@ -210,8 +211,8 @@ func RegisterSwarmSyncerClient(streamer *Registry, db *storage.DBAPI) { } // NeedData -func (s *SwarmSyncerClient) NeedData(key []byte) (wait func()) { - chunk, _ := s.db.GetOrCreateRequest(key) +func (s *SwarmSyncerClient) NeedData(ctx context.Context, key []byte) (wait func()) { + chunk, _ := s.db.GetOrCreateRequest(ctx, key) // TODO: we may want to request from this peer anyway even if the request exists // ignoreExistingRequest is temporary commented out until its functionality is verified. diff --git a/swarm/network/stream/syncer_test.go b/swarm/network/stream/syncer_test.go index 68e20841dfb8..f72aa3444186 100644 --- a/swarm/network/stream/syncer_test.go +++ b/swarm/network/stream/syncer_test.go @@ -23,18 +23,22 @@ import ( "io" "io/ioutil" "math" + "os" "sync" "testing" "time" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/node" + "github.com/ethereum/go-ethereum/p2p" "github.com/ethereum/go-ethereum/p2p/discover" - "github.com/ethereum/go-ethereum/p2p/simulations" - "github.com/ethereum/go-ethereum/rpc" + "github.com/ethereum/go-ethereum/p2p/simulations/adapters" "github.com/ethereum/go-ethereum/swarm/log" "github.com/ethereum/go-ethereum/swarm/network" - streamTesting "github.com/ethereum/go-ethereum/swarm/network/stream/testing" + "github.com/ethereum/go-ethereum/swarm/network/simulation" + "github.com/ethereum/go-ethereum/swarm/state" "github.com/ethereum/go-ethereum/swarm/storage" + mockdb "github.com/ethereum/go-ethereum/swarm/storage/mock/db" ) const dataChunkCount = 200 @@ -46,219 +50,193 @@ func TestSyncerSimulation(t *testing.T) { testSyncBetweenNodes(t, 16, 1, dataChunkCount, true, 1) } -func createMockStore(id discover.NodeID, addr *network.BzzAddr) (storage.ChunkStore, error) { - var err error +func createMockStore(globalStore *mockdb.GlobalStore, id discover.NodeID, addr *network.BzzAddr) (lstore storage.ChunkStore, datadir string, err error) { address := common.BytesToAddress(id.Bytes()) mockStore := globalStore.NewNodeStore(address) params := storage.NewDefaultLocalStoreParams() - datadirs[id], err = ioutil.TempDir("", "localMockStore-"+id.TerminalString()) + + datadir, err = ioutil.TempDir("", "localMockStore-"+id.TerminalString()) if err != nil { - return nil, err + return nil, "", err } - params.Init(datadirs[id]) + params.Init(datadir) params.BaseKey = addr.Over() - lstore, err := storage.NewLocalStore(params, mockStore) - return lstore, nil + lstore, err = storage.NewLocalStore(params, mockStore) + return lstore, datadir, nil } func testSyncBetweenNodes(t *testing.T, nodes, conns, chunkCount int, skipCheck bool, po uint8) { - defer setDefaultSkipCheck(defaultSkipCheck) - defaultSkipCheck = skipCheck - //data directories for each node and store - datadirs = make(map[discover.NodeID]string) - if *useMockStore { - createStoreFunc = createMockStore - createGlobalStore() - } else { - createStoreFunc = createTestLocalStorageFromSim - } - defer datadirsCleanup() + sim := simulation.New(map[string]simulation.ServiceFunc{ + "streamer": func(ctx *adapters.ServiceContext, bucket *sync.Map) (s node.Service, cleanup func(), err error) { + var store storage.ChunkStore + var globalStore *mockdb.GlobalStore + var gDir, datadir string + + id := ctx.Config.ID + addr := network.NewAddrFromNodeID(id) + //hack to put addresses in same space + addr.OAddr[0] = byte(0) + + if *useMockStore { + gDir, globalStore, err = createGlobalStore() + if err != nil { + return nil, nil, fmt.Errorf("Something went wrong; using mockStore enabled but globalStore is nil") + } + store, datadir, err = createMockStore(globalStore, id, addr) + } else { + store, datadir, err = createTestLocalStorageForID(id, addr) + } + if err != nil { + return nil, nil, err + } + bucket.Store(bucketKeyStore, store) + cleanup = func() { + store.Close() + os.RemoveAll(datadir) + if *useMockStore { + err := globalStore.Close() + if err != nil { + log.Error("Error closing global store! %v", "err", err) + } + os.RemoveAll(gDir) + } + } + localStore := store.(*storage.LocalStore) + db := storage.NewDBAPI(localStore) + bucket.Store(bucketKeyDB, db) + kad := network.NewKademlia(addr.Over(), network.NewKadParams()) + delivery := NewDelivery(kad, db) + bucket.Store(bucketKeyDelivery, delivery) + + r := NewRegistry(addr, delivery, db, state.NewInmemoryStore(), &RegistryOptions{ + SkipCheck: skipCheck, + }) + + fileStore := storage.NewFileStore(storage.NewNetStore(localStore, nil), storage.NewFileStoreParams()) + bucket.Store(bucketKeyFileStore, fileStore) + + return r, cleanup, nil + + }, + }) + defer sim.Close() - registries = make(map[discover.NodeID]*TestRegistry) - toAddr = func(id discover.NodeID) *network.BzzAddr { - addr := network.NewAddrFromNodeID(id) - //hack to put addresses in same space - addr.OAddr[0] = byte(0) - return addr - } - conf := &streamTesting.RunConfig{ - Adapter: *adapter, - NodeCount: nodes, - ConnLevel: conns, - ToAddr: toAddr, - Services: services, - EnableMsgEvents: false, - } - // HACK: these are global variables in the test so that they are available for - // the service constructor function - // TODO: will this work with exec/docker adapter? - // localstore of nodes made available for action and check calls - stores = make(map[discover.NodeID]storage.ChunkStore) - deliveries = make(map[discover.NodeID]*Delivery) // create context for simulation run timeout := 30 * time.Second ctx, cancel := context.WithTimeout(context.Background(), timeout) // defer cancel should come before defer simulation teardown defer cancel() - // create simulation network with the config - sim, teardown, err := streamTesting.NewSimulation(conf) - var rpcSubscriptionsWg sync.WaitGroup - defer func() { - rpcSubscriptionsWg.Wait() - teardown() - }() + _, err := sim.AddNodesAndConnectChain(nodes) if err != nil { - t.Fatal(err.Error()) - } - - nodeIndex := make(map[discover.NodeID]int) - for i, id := range sim.IDs { - nodeIndex[id] = i - if !*useMockStore { - stores[id] = sim.Stores[i] - sim.Stores[i] = stores[id] - } - } - // peerCount function gives the number of peer connections for a nodeID - // this is needed for the service run function to wait until - // each protocol instance runs and the streamer peers are available - peerCount = func(id discover.NodeID) int { - if sim.IDs[0] == id || sim.IDs[nodes-1] == id { - return 1 - } - return 2 - } - waitPeerErrC = make(chan error) - - // create DBAPI-s for all nodes - dbs := make([]*storage.DBAPI, nodes) - for i := 0; i < nodes; i++ { - dbs[i] = storage.NewDBAPI(sim.Stores[i].(*storage.LocalStore)) + t.Fatal(err) } + result := sim.Run(ctx, func(ctx context.Context, sim *simulation.Simulation) error { + nodeIDs := sim.UpNodeIDs() - // collect hashes in po 1 bin for each node - hashes := make([][]storage.Address, nodes) - totalHashes := 0 - hashCounts := make([]int, nodes) - for i := nodes - 1; i >= 0; i-- { - if i < nodes-1 { - hashCounts[i] = hashCounts[i+1] + nodeIndex := make(map[discover.NodeID]int) + for i, id := range nodeIDs { + nodeIndex[id] = i } - dbs[i].Iterator(0, math.MaxUint64, po, func(addr storage.Address, index uint64) bool { - hashes[i] = append(hashes[i], addr) - totalHashes++ - hashCounts[i]++ - return true - }) - } - // errc is error channel for simulation - errc := make(chan error, 1) - quitC := make(chan struct{}) - defer close(quitC) + disconnections := sim.PeerEvents( + context.Background(), + sim.NodeIDs(), + simulation.NewPeerEventsFilter().Type(p2p.PeerEventTypeDrop), + ) + + go func() { + for d := range disconnections { + if d.Error != nil { + log.Error("peer drop", "node", d.NodeID, "peer", d.Event.Peer) + t.Fatal(d.Error) + } + } + }() - // action is subscribe - action := func(ctx context.Context) error { - // need to wait till an aynchronous process registers the peers in streamer.peers - // that is used by Subscribe - // the global peerCount function tells how many connections each node has - // TODO: this is to be reimplemented with peerEvent watcher without global var - i := 0 - for err := range waitPeerErrC { + // each node Subscribes to each other's swarmChunkServerStreamName + for j := 0; j < nodes-1; j++ { + id := nodeIDs[j] + client, err := sim.Net.GetNode(id).Client() if err != nil { - return fmt.Errorf("error waiting for peers: %s", err) + t.Fatal(err) } - i++ - if i == nodes { - break + sid := nodeIDs[j+1] + client.CallContext(ctx, nil, "stream_subscribeStream", sid, NewStream("SYNC", FormatSyncBinKey(1), false), NewRange(0, 0), Top) + if err != nil { + return err } - } - // each node Subscribes to each other's swarmChunkServerStreamName - for j := 0; j < nodes-1; j++ { - id := sim.IDs[j] - sim.Stores[j] = stores[id] - err := sim.CallClient(id, func(client *rpc.Client) error { - // report disconnect events to the error channel cos peers should not disconnect - doneC, err := streamTesting.WatchDisconnections(id, client, errc, quitC) + if j > 0 || nodes == 2 { + item, ok := sim.NodeItem(nodeIDs[j], bucketKeyFileStore) + if !ok { + return fmt.Errorf("No filestore") + } + fileStore := item.(*storage.FileStore) + size := chunkCount * chunkSize + _, wait, err := fileStore.Store(ctx, io.LimitReader(crand.Reader, int64(size)), int64(size), false) if err != nil { - return err + t.Fatal(err.Error()) } - rpcSubscriptionsWg.Add(1) - go func() { - <-doneC - rpcSubscriptionsWg.Done() - }() - ctx, cancel := context.WithTimeout(ctx, 1*time.Second) - defer cancel() - // start syncing, i.e., subscribe to upstream peers po 1 bin - sid := sim.IDs[j+1] - return client.CallContext(ctx, nil, "stream_subscribeStream", sid, NewStream("SYNC", FormatSyncBinKey(1), false), NewRange(0, 0), Top) - }) - if err != nil { - return err + wait(ctx) } } // here we distribute chunks of a random file into stores 1...nodes - rrFileStore := storage.NewFileStore(newRoundRobinStore(sim.Stores[1:]...), storage.NewFileStoreParams()) - size := chunkCount * chunkSize - _, wait, err := rrFileStore.Store(io.LimitReader(crand.Reader, int64(size)), int64(size), false) - // need to wait cos we then immediately collect the relevant bin content - wait() - if err != nil { - t.Fatal(err.Error()) + if _, err := sim.WaitTillHealthy(ctx, 2); err != nil { + return err } - return nil - } - - // this makes sure check is not called before the previous call finishes - check := func(ctx context.Context, id discover.NodeID) (bool, error) { - select { - case err := <-errc: - return false, err - case <-ctx.Done(): - return false, ctx.Err() - default: + // collect hashes in po 1 bin for each node + hashes := make([][]storage.Address, nodes) + totalHashes := 0 + hashCounts := make([]int, nodes) + for i := nodes - 1; i >= 0; i-- { + if i < nodes-1 { + hashCounts[i] = hashCounts[i+1] + } + item, ok := sim.NodeItem(nodeIDs[i], bucketKeyDB) + if !ok { + return fmt.Errorf("No DB") + } + db := item.(*storage.DBAPI) + db.Iterator(0, math.MaxUint64, po, func(addr storage.Address, index uint64) bool { + hashes[i] = append(hashes[i], addr) + totalHashes++ + hashCounts[i]++ + return true + }) } - - i := nodeIndex[id] var total, found int - - for j := i; j < nodes; j++ { - total += len(hashes[j]) - for _, key := range hashes[j] { - chunk, err := dbs[i].Get(key) - if err == storage.ErrFetching { - <-chunk.ReqC - } else if err != nil { - continue + for _, node := range nodeIDs { + i := nodeIndex[node] + + for j := i; j < nodes; j++ { + total += len(hashes[j]) + for _, key := range hashes[j] { + item, ok := sim.NodeItem(nodeIDs[j], bucketKeyDB) + if !ok { + return fmt.Errorf("No DB") + } + db := item.(*storage.DBAPI) + chunk, err := db.Get(ctx, key) + if err == storage.ErrFetching { + <-chunk.ReqC + } else if err != nil { + continue + } + // needed for leveldb not to be closed? + // chunk.WaitToStore() + found++ } - // needed for leveldb not to be closed? - // chunk.WaitToStore() - found++ } + log.Debug("sync check", "node", node, "index", i, "bin", po, "found", found, "total", total) } - log.Debug("sync check", "node", id, "index", i, "bin", po, "found", found, "total", total) - return total == found, nil - } + if total == found && total > 0 { + return nil + } + return fmt.Errorf("Total not equallying found: total is %d", total) + }) - conf.Step = &simulations.Step{ - Action: action, - Trigger: streamTesting.Trigger(500*time.Millisecond, quitC, sim.IDs[0:nodes-1]...), - Expect: &simulations.Expectation{ - Nodes: sim.IDs[0:1], - Check: check, - }, - } - startedAt := time.Now() - result, err := sim.Run(ctx, conf) - finishedAt := time.Now() - if err != nil { - t.Fatalf("Setting up simulation failed: %v", err) - } if result.Error != nil { - t.Fatalf("Simulation failed: %s", result.Error) + t.Fatal(result.Error) } - streamTesting.CheckResult(t, result, startedAt, finishedAt) } diff --git a/swarm/network/stream/testing/testing.go b/swarm/network/stream/testing/testing.go deleted file mode 100644 index d584ec3974c2..000000000000 --- a/swarm/network/stream/testing/testing.go +++ /dev/null @@ -1,293 +0,0 @@ -// Copyright 2018 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package testing - -import ( - "context" - "errors" - "fmt" - "io/ioutil" - "math/rand" - "os" - "sync" - "testing" - "time" - - "github.com/ethereum/go-ethereum/p2p" - "github.com/ethereum/go-ethereum/p2p/discover" - "github.com/ethereum/go-ethereum/p2p/simulations" - "github.com/ethereum/go-ethereum/p2p/simulations/adapters" - "github.com/ethereum/go-ethereum/rpc" - "github.com/ethereum/go-ethereum/swarm/log" - "github.com/ethereum/go-ethereum/swarm/network" - "github.com/ethereum/go-ethereum/swarm/storage" -) - -type Simulation struct { - Net *simulations.Network - Stores []storage.ChunkStore - Addrs []network.Addr - IDs []discover.NodeID -} - -func SetStores(addrs ...network.Addr) ([]storage.ChunkStore, func(), error) { - var datadirs []string - stores := make([]storage.ChunkStore, len(addrs)) - var err error - for i, addr := range addrs { - var datadir string - datadir, err = ioutil.TempDir("", "streamer") - if err != nil { - break - } - var store storage.ChunkStore - params := storage.NewDefaultLocalStoreParams() - params.Init(datadir) - params.BaseKey = addr.Over() - store, err = storage.NewTestLocalStoreForAddr(params) - if err != nil { - break - } - datadirs = append(datadirs, datadir) - stores[i] = store - } - teardown := func() { - for i, datadir := range datadirs { - stores[i].Close() - os.RemoveAll(datadir) - } - } - return stores, teardown, err -} - -func NewAdapter(adapterType string, services adapters.Services) (adapter adapters.NodeAdapter, teardown func(), err error) { - teardown = func() {} - switch adapterType { - case "sim": - adapter = adapters.NewSimAdapter(services) - case "exec": - baseDir, err0 := ioutil.TempDir("", "swarm-test") - if err0 != nil { - return nil, teardown, err0 - } - teardown = func() { os.RemoveAll(baseDir) } - adapter = adapters.NewExecAdapter(baseDir) - case "docker": - adapter, err = adapters.NewDockerAdapter() - if err != nil { - return nil, teardown, err - } - default: - return nil, teardown, errors.New("adapter needs to be one of sim, exec, docker") - } - return adapter, teardown, nil -} - -func CheckResult(t *testing.T, result *simulations.StepResult, startedAt, finishedAt time.Time) { - t.Logf("Simulation passed in %s", result.FinishedAt.Sub(result.StartedAt)) - if len(result.Passes) > 1 { - var min, max time.Duration - var sum int - for _, pass := range result.Passes { - duration := pass.Sub(result.StartedAt) - if sum == 0 || duration < min { - min = duration - } - if duration > max { - max = duration - } - sum += int(duration.Nanoseconds()) - } - t.Logf("Min: %s, Max: %s, Average: %s", min, max, time.Duration(sum/len(result.Passes))*time.Nanosecond) - } - t.Logf("Setup: %s, Shutdown: %s", result.StartedAt.Sub(startedAt), finishedAt.Sub(result.FinishedAt)) -} - -type RunConfig struct { - Adapter string - Step *simulations.Step - NodeCount int - ConnLevel int - ToAddr func(discover.NodeID) *network.BzzAddr - Services adapters.Services - DefaultService string - EnableMsgEvents bool -} - -func NewSimulation(conf *RunConfig) (*Simulation, func(), error) { - // create network - nodes := conf.NodeCount - adapter, adapterTeardown, err := NewAdapter(conf.Adapter, conf.Services) - if err != nil { - return nil, adapterTeardown, err - } - defaultService := "streamer" - if conf.DefaultService != "" { - defaultService = conf.DefaultService - } - net := simulations.NewNetwork(adapter, &simulations.NetworkConfig{ - ID: "0", - DefaultService: defaultService, - }) - teardown := func() { - adapterTeardown() - net.Shutdown() - } - ids := make([]discover.NodeID, nodes) - addrs := make([]network.Addr, nodes) - // start nodes - for i := 0; i < nodes; i++ { - nodeconf := adapters.RandomNodeConfig() - nodeconf.EnableMsgEvents = conf.EnableMsgEvents - node, err := net.NewNodeWithConfig(nodeconf) - if err != nil { - return nil, teardown, fmt.Errorf("error creating node: %s", err) - } - ids[i] = node.ID() - addrs[i] = conf.ToAddr(ids[i]) - } - // set nodes number of Stores available - stores, storeTeardown, err := SetStores(addrs...) - teardown = func() { - net.Shutdown() - adapterTeardown() - storeTeardown() - } - if err != nil { - return nil, teardown, err - } - s := &Simulation{ - Net: net, - Stores: stores, - IDs: ids, - Addrs: addrs, - } - return s, teardown, nil -} - -func (s *Simulation) Run(ctx context.Context, conf *RunConfig) (*simulations.StepResult, error) { - // bring up nodes, launch the servive - nodes := conf.NodeCount - conns := conf.ConnLevel - for i := 0; i < nodes; i++ { - if err := s.Net.Start(s.IDs[i]); err != nil { - return nil, fmt.Errorf("error starting node %s: %s", s.IDs[i].TerminalString(), err) - } - } - // run a simulation which connects the 10 nodes in a chain - wg := sync.WaitGroup{} - for i := range s.IDs { - // collect the overlay addresses, to - for j := 0; j < conns; j++ { - var k int - if j == 0 { - k = i - 1 - } else { - k = rand.Intn(len(s.IDs)) - } - if i > 0 { - wg.Add(1) - go func(i, k int) { - defer wg.Done() - s.Net.Connect(s.IDs[i], s.IDs[k]) - }(i, k) - } - } - } - wg.Wait() - log.Info(fmt.Sprintf("simulation with %v nodes", len(s.Addrs))) - - // create an only locally retrieving FileStore for the pivot node to test - // if retriee requests have arrived - result := simulations.NewSimulation(s.Net).Run(ctx, conf.Step) - return result, nil -} - -// WatchDisconnections subscribes to admin peerEvents and sends peer event drop -// errors to the errc channel. Channel quitC signals the termination of the event loop. -// Returned doneC will be closed after the rpc subscription is unsubscribed, -// signaling that simulations network is safe to shutdown. -func WatchDisconnections(id discover.NodeID, client *rpc.Client, errc chan error, quitC chan struct{}) (doneC <-chan struct{}, err error) { - events := make(chan *p2p.PeerEvent) - sub, err := client.Subscribe(context.Background(), "admin", events, "peerEvents") - if err != nil { - return nil, fmt.Errorf("error getting peer events for node %v: %s", id, err) - } - c := make(chan struct{}) - go func() { - defer func() { - log.Trace("watch disconnections: unsubscribe", "id", id) - sub.Unsubscribe() - close(c) - }() - for { - select { - case <-quitC: - return - case e := <-events: - if e.Type == p2p.PeerEventTypeDrop { - select { - case errc <- fmt.Errorf("peerEvent for node %v: %v", id, e): - case <-quitC: - return - } - } - case err := <-sub.Err(): - if err != nil { - select { - case errc <- fmt.Errorf("error getting peer events for node %v: %v", id, err): - case <-quitC: - return - } - } - } - } - }() - return c, nil -} - -func Trigger(d time.Duration, quitC chan struct{}, ids ...discover.NodeID) chan discover.NodeID { - trigger := make(chan discover.NodeID) - go func() { - defer close(trigger) - ticker := time.NewTicker(d) - defer ticker.Stop() - // we are only testing the pivot node (net.Nodes[0]) - for range ticker.C { - for _, id := range ids { - select { - case trigger <- id: - case <-quitC: - return - } - } - } - }() - return trigger -} - -func (sim *Simulation) CallClient(id discover.NodeID, f func(*rpc.Client) error) error { - node := sim.Net.GetNode(id) - if node == nil { - return fmt.Errorf("unknown node: %s", id) - } - client, err := node.Client() - if err != nil { - return fmt.Errorf("error getting node client: %s", err) - } - return f(client) -} diff --git a/swarm/network_test.go b/swarm/network_test.go index c291fce3b60a..176c635d8262 100644 --- a/swarm/network_test.go +++ b/swarm/network_test.go @@ -28,15 +28,14 @@ import ( "testing" "time" - "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/node" "github.com/ethereum/go-ethereum/p2p/discover" - "github.com/ethereum/go-ethereum/p2p/simulations" "github.com/ethereum/go-ethereum/p2p/simulations/adapters" "github.com/ethereum/go-ethereum/swarm/api" "github.com/ethereum/go-ethereum/swarm/network" + "github.com/ethereum/go-ethereum/swarm/network/simulation" "github.com/ethereum/go-ethereum/swarm/storage" colorable "github.com/mattn/go-colorable" ) @@ -261,78 +260,67 @@ type testSwarmNetworkOptions struct { // - May wait for Kademlia on every node to be healthy. // - Checking if a file is retrievable from all nodes. func testSwarmNetwork(t *testing.T, o *testSwarmNetworkOptions, steps ...testSwarmNetworkStep) { - dir, err := ioutil.TempDir("", "swarm-network-test") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(dir) - if o == nil { o = new(testSwarmNetworkOptions) } - ctx := context.Background() - if o.Timeout > 0 { - var cancel context.CancelFunc - ctx, cancel = context.WithTimeout(ctx, o.Timeout) - defer cancel() - } - - swarms := make(map[discover.NodeID]*Swarm) - files := make([]file, 0) - - services := map[string]adapters.ServiceFunc{ - "swarm": func(ctx *adapters.ServiceContext) (node.Service, error) { + sim := simulation.New(map[string]simulation.ServiceFunc{ + "swarm": func(ctx *adapters.ServiceContext, bucket *sync.Map) (s node.Service, cleanup func(), err error) { config := api.NewConfig() - dir, err := ioutil.TempDir(dir, "node") + dir, err := ioutil.TempDir("", "swarm-network-test-node") if err != nil { - return nil, err + return nil, nil, err + } + cleanup = func() { + err := os.RemoveAll(dir) + if err != nil { + log.Error("cleaning up swarm temp dir", "err", err) + } } config.Path = dir privkey, err := crypto.GenerateKey() if err != nil { - return nil, err + return nil, cleanup, err } config.Init(privkey) config.DeliverySkipCheck = o.SkipCheck - s, err := NewSwarm(config, nil) + swarm, err := NewSwarm(config, nil) if err != nil { - return nil, err + return nil, cleanup, err } - log.Info("new swarm", "bzzKey", config.BzzKey, "baseAddr", fmt.Sprintf("%x", s.bzz.BaseAddr())) - swarms[ctx.Config.ID] = s - return s, nil + bucket.Store(simulation.BucketKeyKademlia, swarm.bzz.Hive.Overlay.(*network.Kademlia)) + log.Info("new swarm", "bzzKey", config.BzzKey, "baseAddr", fmt.Sprintf("%x", swarm.bzz.BaseAddr())) + return swarm, cleanup, nil }, - } - - a := adapters.NewSimAdapter(services) - net := simulations.NewNetwork(a, &simulations.NetworkConfig{ - ID: "0", - DefaultService: "swarm", }) - defer net.Shutdown() + defer sim.Close() - trigger := make(chan discover.NodeID) + ctx := context.Background() + if o.Timeout > 0 { + var cancel context.CancelFunc + ctx, cancel = context.WithTimeout(ctx, o.Timeout) + defer cancel() + } - sim := simulations.NewSimulation(net) + files := make([]file, 0) for i, step := range steps { log.Debug("test sync step", "n", i+1, "nodes", step.nodeCount) - change := step.nodeCount - len(allNodeIDs(net)) + change := step.nodeCount - len(sim.UpNodeIDs()) if change > 0 { - _, err := addNodes(change, net) + _, err := sim.AddNodesAndConnectChain(change) if err != nil { t.Fatal(err) } } else if change < 0 { - err := removeNodes(-change, net) + _, err := sim.StopRandomNodes(-change) if err != nil { t.Fatal(err) } @@ -341,160 +329,48 @@ func testSwarmNetwork(t *testing.T, o *testSwarmNetworkOptions, steps ...testSwa continue } - nodeIDs := allNodeIDs(net) - shuffle(len(nodeIDs), func(i, j int) { - nodeIDs[i], nodeIDs[j] = nodeIDs[j], nodeIDs[i] - }) - for _, id := range nodeIDs { - key, data, err := uploadFile(swarms[id]) - if err != nil { - t.Fatal(err) - } - log.Trace("file uploaded", "node", id, "key", key.String()) - files = append(files, file{ - addr: key, - data: data, - nodeID: id, - }) - } - - // Prepare PeerPot map for checking Kademlia health - var ppmap map[string]*network.PeerPot - nIDs := allNodeIDs(net) - addrs := make([][]byte, len(nIDs)) - if *waitKademlia { - for i, id := range nIDs { - addrs[i] = swarms[id].bzz.BaseAddr() - } - ppmap = network.NewPeerPotMap(2, addrs) - } - var checkStatusM sync.Map var nodeStatusM sync.Map var totalFoundCount uint64 - result := sim.Run(ctx, &simulations.Step{ - Action: func(ctx context.Context) error { - if *waitKademlia { - // Wait for healthy Kademlia on every node before checking files - ticker := time.NewTicker(200 * time.Millisecond) - defer ticker.Stop() - - for range ticker.C { - healthy := true - log.Debug("kademlia health check", "node count", len(nIDs), "addr count", len(addrs)) - for i, id := range nIDs { - swarm := swarms[id] - //PeerPot for this node - addr := common.Bytes2Hex(swarm.bzz.BaseAddr()) - pp := ppmap[addr] - //call Healthy RPC - h := swarm.bzz.Healthy(pp) - //print info - log.Debug(swarm.bzz.String()) - log.Debug("kademlia", "empty bins", pp.EmptyBins, "gotNN", h.GotNN, "knowNN", h.KnowNN, "full", h.Full) - log.Debug("kademlia", "health", h.GotNN && h.KnowNN && h.Full, "addr", fmt.Sprintf("%x", swarm.bzz.BaseAddr()), "id", id, "i", i) - log.Debug("kademlia", "ill condition", !h.GotNN || !h.Full, "addr", fmt.Sprintf("%x", swarm.bzz.BaseAddr()), "id", id, "i", i) - if !h.GotNN || !h.Full { - healthy = false - break - } - } - if healthy { - break - } - } + result := sim.Run(ctx, func(ctx context.Context, sim *simulation.Simulation) error { + nodeIDs := sim.UpNodeIDs() + shuffle(len(nodeIDs), func(i, j int) { + nodeIDs[i], nodeIDs[j] = nodeIDs[j], nodeIDs[i] + }) + for _, id := range nodeIDs { + key, data, err := uploadFile(sim.Service("swarm", id).(*Swarm)) + if err != nil { + return err } + log.Trace("file uploaded", "node", id, "key", key.String()) + files = append(files, file{ + addr: key, + data: data, + nodeID: id, + }) + } - go func() { - // File retrieval check is repeated until all uploaded files are retrieved from all nodes - // or until the timeout is reached. - for { - if retrieve(net, files, swarms, trigger, &checkStatusM, &nodeStatusM, &totalFoundCount) == 0 { - return - } - } - }() - return nil - }, - Trigger: trigger, - Expect: &simulations.Expectation{ - Nodes: allNodeIDs(net), - Check: func(ctx context.Context, id discover.NodeID) (bool, error) { - // The check is done by a goroutine in the action function. - return true, nil - }, - }, - }) - if result.Error != nil { - t.Fatal(result.Error) - } - log.Debug("done: test sync step", "n", i+1, "nodes", step.nodeCount) - } -} - -// allNodeIDs is returning NodeID for every node that is Up. -func allNodeIDs(net *simulations.Network) (nodes []discover.NodeID) { - for _, n := range net.GetNodes() { - if n.Up { - nodes = append(nodes, n.ID()) - } - } - return -} - -// addNodes adds a number of nodes to the network. -func addNodes(count int, net *simulations.Network) (ids []discover.NodeID, err error) { - for i := 0; i < count; i++ { - nodeIDs := allNodeIDs(net) - l := len(nodeIDs) - nodeconf := adapters.RandomNodeConfig() - node, err := net.NewNodeWithConfig(nodeconf) - if err != nil { - return nil, fmt.Errorf("create node: %v", err) - } - err = net.Start(node.ID()) - if err != nil { - return nil, fmt.Errorf("start node: %v", err) - } - - log.Debug("created node", "id", node.ID()) - - // connect nodes in a chain - if l > 0 { - var otherNodeID discover.NodeID - for i := l - 1; i >= 0; i-- { - n := net.GetNode(nodeIDs[i]) - if n.Up { - otherNodeID = n.ID() - break + if *waitKademlia { + if _, err := sim.WaitTillHealthy(ctx, 2); err != nil { + return err } } - log.Debug("connect nodes", "one", node.ID(), "other", otherNodeID) - if err := net.Connect(node.ID(), otherNodeID); err != nil { - return nil, err + + // File retrieval check is repeated until all uploaded files are retrieved from all nodes + // or until the timeout is reached. + for { + if retrieve(sim, files, &checkStatusM, &nodeStatusM, &totalFoundCount) == 0 { + return nil + } } - } - ids = append(ids, node.ID()) - } - return ids, nil -} + }) -// removeNodes stops a random nodes in the network. -func removeNodes(count int, net *simulations.Network) error { - for i := 0; i < count; i++ { - // allNodeIDs are returning only the Up nodes. - nodeIDs := allNodeIDs(net) - if len(nodeIDs) == 0 { - break - } - node := net.GetNode(nodeIDs[rand.Intn(len(nodeIDs))]) - if err := node.Stop(); err != nil { - return err + if result.Error != nil { + t.Fatal(result.Error) } - log.Debug("removed node", "id", node.ID()) + log.Debug("done: test sync step", "n", i+1, "nodes", step.nodeCount) } - return nil } // uploadFile, uploads a short file to the swarm instance @@ -508,23 +384,22 @@ func uploadFile(swarm *Swarm) (storage.Address, string, error) { // File data is very short, but it is ensured that its // uniqueness is very certain. data := fmt.Sprintf("test content %s %x", time.Now().Round(0), b) - k, wait, err := swarm.api.Put(data, "text/plain", false) + ctx := context.TODO() + k, wait, err := swarm.api.Put(ctx, data, "text/plain", false) if err != nil { return nil, "", err } if wait != nil { - wait() + err = wait(ctx) } - return k, data, nil + return k, data, err } // retrieve is the function that is used for checking the availability of // uploaded files in testSwarmNetwork test helper function. func retrieve( - net *simulations.Network, + sim *simulation.Simulation, files []file, - swarms map[discover.NodeID]*Swarm, - trigger chan discover.NodeID, checkStatusM *sync.Map, nodeStatusM *sync.Map, totalFoundCount *uint64, @@ -536,7 +411,7 @@ func retrieve( var totalWg sync.WaitGroup errc := make(chan error) - nodeIDs := allNodeIDs(net) + nodeIDs := sim.UpNodeIDs() totalCheckCount := len(nodeIDs) * len(files) @@ -552,8 +427,8 @@ func retrieve( var wg sync.WaitGroup + swarm := sim.Service("swarm", id).(*Swarm) for _, f := range files { - swarm := swarms[id] checkKey := check{ key: f.addr.String(), @@ -570,7 +445,7 @@ func retrieve( log.Debug("api get: check file", "node", id.String(), "key", f.addr.String(), "total files found", atomic.LoadUint64(totalFoundCount)) - r, _, _, _, err := swarm.api.Get(f.addr, "/") + r, _, _, _, err := swarm.api.Get(context.TODO(), api.NOOPDecrypt, f.addr, "/") if err != nil { errc <- fmt.Errorf("api get: node %s, key %s, kademlia %s: %v", id, f.addr, swarm.bzz.Hive, err) return @@ -600,7 +475,6 @@ func retrieve( if foundCount == checkCount { log.Info("all files are found for node", "id", id.String(), "duration", time.Since(start)) nodeStatusM.Store(id, 0) - trigger <- id return } log.Debug("files missing for node", "id", id.String(), "check", checkCount, "found", foundCount) diff --git a/swarm/pss/handshake.go b/swarm/pss/handshake.go index 3b44847ecc1c..e3ead77d0492 100644 --- a/swarm/pss/handshake.go +++ b/swarm/pss/handshake.go @@ -385,7 +385,7 @@ func (ctl *HandshakeController) sendKey(pubkeyid string, topic *Topic, keycount // generate new keys to send for i := 0; i < len(recvkeyids); i++ { var err error - recvkeyids[i], err = ctl.pss.generateSymmetricKey(*topic, to, true) + recvkeyids[i], err = ctl.pss.GenerateSymmetricKey(*topic, to, true) if err != nil { return []string{}, fmt.Errorf("set receive symkey fail (pubkey %x topic %x): %v", pubkeyid, topic, err) } diff --git a/swarm/pss/notify/notify.go b/swarm/pss/notify/notify.go new file mode 100644 index 000000000000..723092c32d2b --- /dev/null +++ b/swarm/pss/notify/notify.go @@ -0,0 +1,394 @@ +package notify + +import ( + "crypto/ecdsa" + "fmt" + "sync" + + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/p2p" + "github.com/ethereum/go-ethereum/rlp" + "github.com/ethereum/go-ethereum/swarm/log" + "github.com/ethereum/go-ethereum/swarm/pss" +) + +const ( + // sent from requester to updater to request start of notifications + MsgCodeStart = iota + + // sent from updater to requester, contains a notification plus a new symkey to replace the old + MsgCodeNotifyWithKey + + // sent from updater to requester, contains a notification + MsgCodeNotify + + // sent from requester to updater to request stop of notifications (currently unused) + MsgCodeStop + MsgCodeMax +) + +const ( + DefaultAddressLength = 1 + symKeyLength = 32 // this should be gotten from source +) + +var ( + // control topic is used before symmetric key issuance completes + controlTopic = pss.Topic{0x00, 0x00, 0x00, 0x01} +) + +// when code is MsgCodeStart, Payload is address +// when code is MsgCodeNotifyWithKey, Payload is notification | symkey +// when code is MsgCodeNotify, Payload is notification +// when code is MsgCodeStop, Payload is address +type Msg struct { + Code byte + Name []byte + Payload []byte + namestring string +} + +// NewMsg creates a new notification message object +func NewMsg(code byte, name string, payload []byte) *Msg { + return &Msg{ + Code: code, + Name: []byte(name), + Payload: payload, + namestring: name, + } +} + +// NewMsgFromPayload decodes a serialized message payload into a new notification message object +func NewMsgFromPayload(payload []byte) (*Msg, error) { + msg := &Msg{} + err := rlp.DecodeBytes(payload, msg) + if err != nil { + return nil, err + } + msg.namestring = string(msg.Name) + return msg, nil +} + +// a notifier has one sendBin entry for each address space it sends messages to +type sendBin struct { + address pss.PssAddress + symKeyId string + count int +} + +// represents a single notification service +// only subscription address bins that match the address of a notification client have entries. +type notifier struct { + bins map[string]*sendBin + topic pss.Topic // identifies the resource for pss receiver + threshold int // amount of address bytes used in bins + updateC <-chan []byte + quitC chan struct{} +} + +func (n *notifier) removeSubscription() { + n.quitC <- struct{}{} +} + +// represents an individual subscription made by a public key at a specific address/neighborhood +type subscription struct { + pubkeyId string + address pss.PssAddress + handler func(string, []byte) error +} + +// Controller is the interface to control, add and remove notification services and subscriptions +type Controller struct { + pss *pss.Pss + notifiers map[string]*notifier + subscriptions map[string]*subscription + mu sync.Mutex +} + +// NewController creates a new Controller object +func NewController(ps *pss.Pss) *Controller { + ctrl := &Controller{ + pss: ps, + notifiers: make(map[string]*notifier), + subscriptions: make(map[string]*subscription), + } + ctrl.pss.Register(&controlTopic, ctrl.Handler) + return ctrl +} + +// IsActive is used to check if a notification service exists for a specified id string +// Returns true if exists, false if not +func (c *Controller) IsActive(name string) bool { + c.mu.Lock() + defer c.mu.Unlock() + return c.isActive(name) +} + +func (c *Controller) isActive(name string) bool { + _, ok := c.notifiers[name] + return ok +} + +// Subscribe is used by a client to request notifications from a notification service provider +// It will create a MsgCodeStart message and send asymmetrically to the provider using its public key and routing address +// The handler function is a callback that will be called when notifications are received +// Fails if the request pss cannot be sent or if the update message could not be serialized +func (c *Controller) Subscribe(name string, pubkey *ecdsa.PublicKey, address pss.PssAddress, handler func(string, []byte) error) error { + c.mu.Lock() + defer c.mu.Unlock() + msg := NewMsg(MsgCodeStart, name, c.pss.BaseAddr()) + c.pss.SetPeerPublicKey(pubkey, controlTopic, &address) + pubkeyId := hexutil.Encode(crypto.FromECDSAPub(pubkey)) + smsg, err := rlp.EncodeToBytes(msg) + if err != nil { + return err + } + err = c.pss.SendAsym(pubkeyId, controlTopic, smsg) + if err != nil { + return err + } + c.subscriptions[name] = &subscription{ + pubkeyId: pubkeyId, + address: address, + handler: handler, + } + return nil +} + +// Unsubscribe, perhaps unsurprisingly, undoes the effects of Subscribe +// Fails if the subscription does not exist, if the request pss cannot be sent or if the update message could not be serialized +func (c *Controller) Unsubscribe(name string) error { + c.mu.Lock() + defer c.mu.Unlock() + sub, ok := c.subscriptions[name] + if !ok { + return fmt.Errorf("Unknown subscription '%s'", name) + } + msg := NewMsg(MsgCodeStop, name, sub.address) + smsg, err := rlp.EncodeToBytes(msg) + if err != nil { + return err + } + err = c.pss.SendAsym(sub.pubkeyId, controlTopic, smsg) + if err != nil { + return err + } + delete(c.subscriptions, name) + return nil +} + +// NewNotifier is used by a notification service provider to create a new notification service +// It takes a name as identifier for the resource, a threshold indicating the granularity of the subscription address bin +// It then starts an event loop which listens to the supplied update channel and executes notifications on channel receives +// Fails if a notifier already is registered on the name +//func (c *Controller) NewNotifier(name string, threshold int, contentFunc func(string) ([]byte, error)) error { +func (c *Controller) NewNotifier(name string, threshold int, updateC <-chan []byte) (func(), error) { + c.mu.Lock() + if c.isActive(name) { + c.mu.Unlock() + return nil, fmt.Errorf("Notification service %s already exists in controller", name) + } + quitC := make(chan struct{}) + c.notifiers[name] = ¬ifier{ + bins: make(map[string]*sendBin), + topic: pss.BytesToTopic([]byte(name)), + threshold: threshold, + updateC: updateC, + quitC: quitC, + //contentFunc: contentFunc, + } + c.mu.Unlock() + go func() { + for { + select { + case <-quitC: + return + case data := <-updateC: + c.notify(name, data) + } + } + }() + + return c.notifiers[name].removeSubscription, nil +} + +// RemoveNotifier is used to stop a notification service. +// It cancels the event loop listening to the notification provider's update channel +func (c *Controller) RemoveNotifier(name string) error { + c.mu.Lock() + defer c.mu.Unlock() + currentNotifier, ok := c.notifiers[name] + if !ok { + return fmt.Errorf("Unknown notification service %s", name) + } + currentNotifier.removeSubscription() + delete(c.notifiers, name) + return nil +} + +// Notify is called by a notification service provider to issue a new notification +// It takes the name of the notification service and the data to be sent. +// It fails if a notifier with this name does not exist or if data could not be serialized +// Note that it does NOT fail on failure to send a message +func (c *Controller) notify(name string, data []byte) error { + c.mu.Lock() + defer c.mu.Unlock() + if !c.isActive(name) { + return fmt.Errorf("Notification service %s doesn't exist", name) + } + msg := NewMsg(MsgCodeNotify, name, data) + smsg, err := rlp.EncodeToBytes(msg) + if err != nil { + return err + } + for _, m := range c.notifiers[name].bins { + log.Debug("sending pss notify", "name", name, "addr", fmt.Sprintf("%x", m.address), "topic", fmt.Sprintf("%x", c.notifiers[name].topic), "data", data) + go func(m *sendBin) { + err = c.pss.SendSym(m.symKeyId, c.notifiers[name].topic, smsg) + if err != nil { + log.Warn("Failed to send notify to addr %x: %v", m.address, err) + } + }(m) + } + return nil +} + +// check if we already have the bin +// if we do, retrieve the symkey from it and increment the count +// if we dont make a new symkey and a new bin entry +func (c *Controller) addToBin(ntfr *notifier, address []byte) (symKeyId string, pssAddress pss.PssAddress, err error) { + + // parse the address from the message and truncate if longer than our bins threshold + if len(address) > ntfr.threshold { + address = address[:ntfr.threshold] + } + + pssAddress = pss.PssAddress(address) + hexAddress := fmt.Sprintf("%x", address) + currentBin, ok := ntfr.bins[hexAddress] + if ok { + currentBin.count++ + symKeyId = currentBin.symKeyId + } else { + symKeyId, err = c.pss.GenerateSymmetricKey(ntfr.topic, &pssAddress, false) + if err != nil { + return "", nil, err + } + ntfr.bins[hexAddress] = &sendBin{ + address: address, + symKeyId: symKeyId, + count: 1, + } + } + return symKeyId, pssAddress, nil +} + +func (c *Controller) handleStartMsg(msg *Msg, keyid string) (err error) { + + keyidbytes, err := hexutil.Decode(keyid) + if err != nil { + return err + } + pubkey, err := crypto.UnmarshalPubkey(keyidbytes) + if err != nil { + return err + } + + // if name is not registered for notifications we will not react + currentNotifier, ok := c.notifiers[msg.namestring] + if !ok { + return fmt.Errorf("Subscribe attempted on unknown resource '%s'", msg.namestring) + } + + // add to or open new bin + symKeyId, pssAddress, err := c.addToBin(currentNotifier, msg.Payload) + if err != nil { + return err + } + + // add to address book for send initial notify + symkey, err := c.pss.GetSymmetricKey(symKeyId) + if err != nil { + return err + } + err = c.pss.SetPeerPublicKey(pubkey, controlTopic, &pssAddress) + if err != nil { + return err + } + + // TODO this is set to zero-length byte pending decision on protocol for initial message, whether it should include message or not, and how to trigger the initial message so that current state of MRU is sent upon subscription + notify := []byte{} + replyMsg := NewMsg(MsgCodeNotifyWithKey, msg.namestring, make([]byte, len(notify)+symKeyLength)) + copy(replyMsg.Payload, notify) + copy(replyMsg.Payload[len(notify):], symkey) + sReplyMsg, err := rlp.EncodeToBytes(replyMsg) + if err != nil { + return err + } + return c.pss.SendAsym(keyid, controlTopic, sReplyMsg) +} + +func (c *Controller) handleNotifyWithKeyMsg(msg *Msg) error { + symkey := msg.Payload[len(msg.Payload)-symKeyLength:] + topic := pss.BytesToTopic(msg.Name) + + // \TODO keep track of and add actual address + updaterAddr := pss.PssAddress([]byte{}) + c.pss.SetSymmetricKey(symkey, topic, &updaterAddr, true) + c.pss.Register(&topic, c.Handler) + return c.subscriptions[msg.namestring].handler(msg.namestring, msg.Payload[:len(msg.Payload)-symKeyLength]) +} + +func (c *Controller) handleStopMsg(msg *Msg) error { + // if name is not registered for notifications we will not react + currentNotifier, ok := c.notifiers[msg.namestring] + if !ok { + return fmt.Errorf("Unsubscribe attempted on unknown resource '%s'", msg.namestring) + } + + // parse the address from the message and truncate if longer than our bins' address length threshold + address := msg.Payload + if len(msg.Payload) > currentNotifier.threshold { + address = address[:currentNotifier.threshold] + } + + // remove the entry from the bin if it exists, and remove the bin if it's the last remaining one + hexAddress := fmt.Sprintf("%x", address) + currentBin, ok := currentNotifier.bins[hexAddress] + if !ok { + return fmt.Errorf("found no active bin for address %s", hexAddress) + } + currentBin.count-- + if currentBin.count == 0 { // if no more clients in this bin, remove it + delete(currentNotifier.bins, hexAddress) + } + return nil +} + +// Handler is the pss topic handler to be used to process notification service messages +// It should be registered in the pss of both to any notification service provides and clients using the service +func (c *Controller) Handler(smsg []byte, p *p2p.Peer, asymmetric bool, keyid string) error { + c.mu.Lock() + defer c.mu.Unlock() + log.Debug("notify controller handler", "keyid", keyid) + + // see if the message is valid + msg, err := NewMsgFromPayload(smsg) + if err != nil { + return err + } + + switch msg.Code { + case MsgCodeStart: + return c.handleStartMsg(msg, keyid) + case MsgCodeNotifyWithKey: + return c.handleNotifyWithKeyMsg(msg) + case MsgCodeNotify: + return c.subscriptions[msg.namestring].handler(msg.namestring, msg.Payload) + case MsgCodeStop: + return c.handleStopMsg(msg) + } + + return fmt.Errorf("Invalid message code: %d", msg.Code) +} diff --git a/swarm/pss/notify/notify_test.go b/swarm/pss/notify/notify_test.go new file mode 100644 index 000000000000..3c655f215ccb --- /dev/null +++ b/swarm/pss/notify/notify_test.go @@ -0,0 +1,252 @@ +package notify + +import ( + "bytes" + "context" + "flag" + "fmt" + "os" + "testing" + "time" + + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/node" + "github.com/ethereum/go-ethereum/p2p/discover" + "github.com/ethereum/go-ethereum/p2p/simulations" + "github.com/ethereum/go-ethereum/p2p/simulations/adapters" + "github.com/ethereum/go-ethereum/swarm/network" + "github.com/ethereum/go-ethereum/swarm/pss" + "github.com/ethereum/go-ethereum/swarm/state" + whisper "github.com/ethereum/go-ethereum/whisper/whisperv5" +) + +var ( + loglevel = flag.Int("l", 3, "loglevel") + psses map[string]*pss.Pss + w *whisper.Whisper + wapi *whisper.PublicWhisperAPI +) + +func init() { + flag.Parse() + hs := log.StreamHandler(os.Stderr, log.TerminalFormat(true)) + hf := log.LvlFilterHandler(log.Lvl(*loglevel), hs) + h := log.CallerFileHandler(hf) + log.Root().SetHandler(h) + + w = whisper.New(&whisper.DefaultConfig) + wapi = whisper.NewPublicWhisperAPI(w) + psses = make(map[string]*pss.Pss) +} + +// Creates a client node and notifier node +// Client sends pss notifications requests +// notifier sends initial notification with symmetric key, and +// second notification symmetrically encrypted +func TestStart(t *testing.T) { + adapter := adapters.NewSimAdapter(newServices(false)) + net := simulations.NewNetwork(adapter, &simulations.NetworkConfig{ + ID: "0", + DefaultService: "bzz", + }) + leftNodeConf := adapters.RandomNodeConfig() + leftNodeConf.Services = []string{"bzz", "pss"} + leftNode, err := net.NewNodeWithConfig(leftNodeConf) + if err != nil { + t.Fatal(err) + } + err = net.Start(leftNode.ID()) + if err != nil { + t.Fatal(err) + } + + rightNodeConf := adapters.RandomNodeConfig() + rightNodeConf.Services = []string{"bzz", "pss"} + rightNode, err := net.NewNodeWithConfig(rightNodeConf) + if err != nil { + t.Fatal(err) + } + err = net.Start(rightNode.ID()) + if err != nil { + t.Fatal(err) + } + + err = net.Connect(rightNode.ID(), leftNode.ID()) + if err != nil { + t.Fatal(err) + } + + leftRpc, err := leftNode.Client() + if err != nil { + t.Fatal(err) + } + + rightRpc, err := rightNode.Client() + if err != nil { + t.Fatal(err) + } + + var leftAddr string + err = leftRpc.Call(&leftAddr, "pss_baseAddr") + if err != nil { + t.Fatal(err) + } + + var rightAddr string + err = rightRpc.Call(&rightAddr, "pss_baseAddr") + if err != nil { + t.Fatal(err) + } + + var leftPub string + err = leftRpc.Call(&leftPub, "pss_getPublicKey") + if err != nil { + t.Fatal(err) + } + + var rightPub string + err = rightRpc.Call(&rightPub, "pss_getPublicKey") + if err != nil { + t.Fatal(err) + } + + rsrcName := "foo.eth" + rsrcTopic := pss.BytesToTopic([]byte(rsrcName)) + + // wait for kademlia table to populate + time.Sleep(time.Second) + + ctx, cancel := context.WithTimeout(context.Background(), time.Second*2) + defer cancel() + rmsgC := make(chan *pss.APIMsg) + rightSub, err := rightRpc.Subscribe(ctx, "pss", rmsgC, "receive", controlTopic) + if err != nil { + t.Fatal(err) + } + defer rightSub.Unsubscribe() + + updateC := make(chan []byte) + updateMsg := []byte{} + ctrlClient := NewController(psses[rightPub]) + ctrlNotifier := NewController(psses[leftPub]) + ctrlNotifier.NewNotifier("foo.eth", 2, updateC) + + pubkeybytes, err := hexutil.Decode(leftPub) + if err != nil { + t.Fatal(err) + } + pubkey, err := crypto.UnmarshalPubkey(pubkeybytes) + if err != nil { + t.Fatal(err) + } + addrbytes, err := hexutil.Decode(leftAddr) + if err != nil { + t.Fatal(err) + } + ctrlClient.Subscribe(rsrcName, pubkey, addrbytes, func(s string, b []byte) error { + if s != "foo.eth" || !bytes.Equal(updateMsg, b) { + t.Fatalf("unexpected result in client handler: '%s':'%x'", s, b) + } + log.Info("client handler receive", "s", s, "b", b) + return nil + }) + + var inMsg *pss.APIMsg + select { + case inMsg = <-rmsgC: + case <-ctx.Done(): + t.Fatal(ctx.Err()) + } + + dMsg, err := NewMsgFromPayload(inMsg.Msg) + if err != nil { + t.Fatal(err) + } + if dMsg.namestring != rsrcName { + t.Fatalf("expected name '%s', got '%s'", rsrcName, dMsg.namestring) + } + if !bytes.Equal(dMsg.Payload[:len(updateMsg)], updateMsg) { + t.Fatalf("expected payload first %d bytes '%x', got '%x'", len(updateMsg), updateMsg, dMsg.Payload[:len(updateMsg)]) + } + if len(updateMsg)+symKeyLength != len(dMsg.Payload) { + t.Fatalf("expected payload length %d, have %d", len(updateMsg)+symKeyLength, len(dMsg.Payload)) + } + + rightSubUpdate, err := rightRpc.Subscribe(ctx, "pss", rmsgC, "receive", rsrcTopic) + if err != nil { + t.Fatal(err) + } + defer rightSubUpdate.Unsubscribe() + + updateMsg = []byte("plugh") + updateC <- updateMsg + select { + case inMsg = <-rmsgC: + case <-ctx.Done(): + log.Error("timed out waiting for msg", "topic", fmt.Sprintf("%x", rsrcTopic)) + t.Fatal(ctx.Err()) + } + dMsg, err = NewMsgFromPayload(inMsg.Msg) + if err != nil { + t.Fatal(err) + } + if dMsg.namestring != rsrcName { + t.Fatalf("expected name %s, got %s", rsrcName, dMsg.namestring) + } + if !bytes.Equal(dMsg.Payload, updateMsg) { + t.Fatalf("expected payload '%x', got '%x'", updateMsg, dMsg.Payload) + } + +} + +func newServices(allowRaw bool) adapters.Services { + stateStore := state.NewInmemoryStore() + kademlias := make(map[discover.NodeID]*network.Kademlia) + kademlia := func(id discover.NodeID) *network.Kademlia { + if k, ok := kademlias[id]; ok { + return k + } + addr := network.NewAddrFromNodeID(id) + params := network.NewKadParams() + params.MinProxBinSize = 2 + params.MaxBinSize = 3 + params.MinBinSize = 1 + params.MaxRetries = 1000 + params.RetryExponent = 2 + params.RetryInterval = 1000000 + kademlias[id] = network.NewKademlia(addr.Over(), params) + return kademlias[id] + } + return adapters.Services{ + "pss": func(ctx *adapters.ServiceContext) (node.Service, error) { + ctxlocal, cancel := context.WithTimeout(context.Background(), time.Second) + defer cancel() + keys, err := wapi.NewKeyPair(ctxlocal) + privkey, err := w.GetPrivateKey(keys) + pssp := pss.NewPssParams().WithPrivateKey(privkey) + pssp.MsgTTL = time.Second * 30 + pssp.AllowRaw = allowRaw + pskad := kademlia(ctx.Config.ID) + ps, err := pss.NewPss(pskad, pssp) + if err != nil { + return nil, err + } + //psses[common.ToHex(crypto.FromECDSAPub(&privkey.PublicKey))] = ps + psses[hexutil.Encode(crypto.FromECDSAPub(&privkey.PublicKey))] = ps + return ps, nil + }, + "bzz": func(ctx *adapters.ServiceContext) (node.Service, error) { + addr := network.NewAddrFromNodeID(ctx.Config.ID) + hp := network.NewHiveParams() + hp.Discovery = false + config := &network.BzzConfig{ + OverlayAddr: addr.Over(), + UnderlayAddr: addr.Under(), + HiveParams: hp, + } + return network.NewBzz(config, kademlia(ctx.Config.ID), stateStore, nil, nil), nil + }, + } +} diff --git a/swarm/pss/ping.go b/swarm/pss/ping.go index 2ef0729187a9..ff635f40a444 100644 --- a/swarm/pss/ping.go +++ b/swarm/pss/ping.go @@ -19,6 +19,7 @@ package pss import ( + "context" "errors" "time" @@ -40,7 +41,7 @@ type Ping struct { InC chan bool // optional, report back to calling code } -func (p *Ping) pingHandler(msg interface{}) error { +func (p *Ping) pingHandler(ctx context.Context, msg interface{}) error { var pingmsg *PingMsg var ok bool if pingmsg, ok = msg.(*PingMsg); !ok { @@ -80,7 +81,7 @@ func NewPingProtocol(ping *Ping) *p2p.Protocol { for { select { case ispong := <-ping.OutC: - pp.Send(&PingMsg{ + pp.Send(context.TODO(), &PingMsg{ Created: time.Now(), Pong: ispong, }) diff --git a/swarm/pss/protocol.go b/swarm/pss/protocol.go index bf23e49dafad..5fcae090efb9 100644 --- a/swarm/pss/protocol.go +++ b/swarm/pss/protocol.go @@ -172,6 +172,8 @@ func (p *Protocol) Handle(msg []byte, peer *p2p.Peer, asymmetric bool, keyid str rw, err := p.AddPeer(peer, *p.topic, asymmetric, keyid) if err != nil { return err + } else if rw == nil { + return fmt.Errorf("handle called on nil MsgReadWriter for new key " + keyid) } vrw = rw.(*PssReadWriter) } @@ -181,8 +183,14 @@ func (p *Protocol) Handle(msg []byte, peer *p2p.Peer, asymmetric bool, keyid str return fmt.Errorf("could not decode pssmsg") } if asymmetric { + if p.pubKeyRWPool[keyid] == nil { + return fmt.Errorf("handle called on nil MsgReadWriter for key " + keyid) + } vrw = p.pubKeyRWPool[keyid].(*PssReadWriter) } else { + if p.symKeyRWPool[keyid] == nil { + return fmt.Errorf("handle called on nil MsgReadWriter for key " + keyid) + } vrw = p.symKeyRWPool[keyid].(*PssReadWriter) } vrw.injectMsg(pmsg) diff --git a/swarm/pss/pss.go b/swarm/pss/pss.go index 77191b25a012..8459211ddb1a 100644 --- a/swarm/pss/pss.go +++ b/swarm/pss/pss.go @@ -18,6 +18,7 @@ package pss import ( "bytes" + "context" "crypto/ecdsa" "crypto/rand" "errors" @@ -41,7 +42,7 @@ import ( const ( defaultPaddingByteSize = 16 - defaultMsgTTL = time.Second * 120 + DefaultMsgTTL = time.Second * 120 defaultDigestCacheTTL = time.Second * 10 defaultSymKeyCacheCapacity = 512 digestLength = 32 // byte length of digest used for pss cache (currently same as swarm chunk hash) @@ -71,7 +72,7 @@ type senderPeer interface { Info() *p2p.PeerInfo ID() discover.NodeID Address() []byte - Send(interface{}) error + Send(context.Context, interface{}) error } // per-key peer related information @@ -94,7 +95,7 @@ type PssParams struct { // Sane defaults for Pss func NewPssParams() *PssParams { return &PssParams{ - MsgTTL: defaultMsgTTL, + MsgTTL: DefaultMsgTTL, CacheTTL: defaultDigestCacheTTL, SymKeyCacheCapacity: defaultSymKeyCacheCapacity, } @@ -231,12 +232,13 @@ func (p *Pss) Start(srv *p2p.Server) error { } } }() - log.Debug("Started pss", "public key", common.ToHex(crypto.FromECDSAPub(p.PublicKey()))) + log.Info("Started Pss") + log.Info("Loaded EC keys", "pubkey", common.ToHex(crypto.FromECDSAPub(p.PublicKey())), "secp256", common.ToHex(crypto.CompressPubkey(p.PublicKey()))) return nil } func (p *Pss) Stop() error { - log.Info("pss shutting down") + log.Info("Pss shutting down") close(p.quitC) return nil } @@ -344,7 +346,7 @@ func (p *Pss) getHandlers(topic Topic) map[*Handler]bool { // Check if address partially matches // If yes, it CAN be for us, and we process it // Only passes error to pss protocol handler if payload is not valid pssmsg -func (p *Pss) handlePssMsg(msg interface{}) error { +func (p *Pss) handlePssMsg(ctx context.Context, msg interface{}) error { metrics.GetOrRegisterCounter("pss.handlepssmsg", nil).Inc(1) pssmsg, ok := msg.(*PssMsg) @@ -354,11 +356,11 @@ func (p *Pss) handlePssMsg(msg interface{}) error { } if int64(pssmsg.Expire) < time.Now().Unix() { metrics.GetOrRegisterCounter("pss.expire", nil).Inc(1) - log.Warn("pss filtered expired message", "from", fmt.Sprintf("%x", p.Overlay.BaseAddr()), "to", fmt.Sprintf("%x", common.ToHex(pssmsg.To))) + log.Warn("pss filtered expired message", "from", common.ToHex(p.Overlay.BaseAddr()), "to", common.ToHex(pssmsg.To)) return nil } if p.checkFwdCache(pssmsg) { - log.Trace(fmt.Sprintf("pss relay block-cache match (process): FROM %x TO %x", p.Overlay.BaseAddr(), common.ToHex(pssmsg.To))) + log.Trace("pss relay block-cache match (process)", "from", common.ToHex(p.Overlay.BaseAddr()), "to", (common.ToHex(pssmsg.To))) return nil } p.addFwdCache(pssmsg) @@ -480,7 +482,7 @@ func (p *Pss) SetPeerPublicKey(pubkey *ecdsa.PublicKey, topic Topic, address *Ps } // Automatically generate a new symkey for a topic and address hint -func (p *Pss) generateSymmetricKey(topic Topic, address *PssAddress, addToCache bool) (string, error) { +func (p *Pss) GenerateSymmetricKey(topic Topic, address *PssAddress, addToCache bool) (string, error) { keyid, err := p.w.GenerateSymKey() if err != nil { return "", err @@ -844,7 +846,7 @@ func (p *Pss) forward(msg *PssMsg) error { p.fwdPoolMu.RUnlock() // attempt to send the message - err := pp.Send(msg) + err := pp.Send(context.TODO(), msg) if err != nil { metrics.GetOrRegisterCounter("pss.pp.send.error", nil).Inc(1) log.Error(err.Error()) diff --git a/swarm/pss/pss_test.go b/swarm/pss/pss_test.go index a59a5e4270a4..41b03db28682 100644 --- a/swarm/pss/pss_test.go +++ b/swarm/pss/pss_test.go @@ -334,7 +334,7 @@ func TestHandlerConditions(t *testing.T) { Data: []byte{0x66, 0x6f, 0x6f}, }, } - if err := ps.handlePssMsg(msg); err != nil { + if err := ps.handlePssMsg(context.TODO(), msg); err != nil { t.Fatal(err.Error()) } tmr := time.NewTimer(time.Millisecond * 100) @@ -351,7 +351,7 @@ func TestHandlerConditions(t *testing.T) { // message should pass and queue due to partial length msg.To = addr[0:1] msg.Payload.Data = []byte{0x78, 0x79, 0x80, 0x80, 0x79} - if err := ps.handlePssMsg(msg); err != nil { + if err := ps.handlePssMsg(context.TODO(), msg); err != nil { t.Fatal(err.Error()) } tmr.Reset(time.Millisecond * 100) @@ -374,7 +374,7 @@ func TestHandlerConditions(t *testing.T) { // full address mismatch should put message in queue msg.To[0] = 0xff - if err := ps.handlePssMsg(msg); err != nil { + if err := ps.handlePssMsg(context.TODO(), msg); err != nil { t.Fatal(err.Error()) } tmr.Reset(time.Millisecond * 10) @@ -397,7 +397,7 @@ func TestHandlerConditions(t *testing.T) { // expired message should be dropped msg.Expire = uint32(time.Now().Add(-time.Second).Unix()) - if err := ps.handlePssMsg(msg); err != nil { + if err := ps.handlePssMsg(context.TODO(), msg); err != nil { t.Fatal(err.Error()) } tmr.Reset(time.Millisecond * 10) @@ -417,7 +417,7 @@ func TestHandlerConditions(t *testing.T) { }{ pssMsg: &PssMsg{}, } - if err := ps.handlePssMsg(fckedupmsg); err == nil { + if err := ps.handlePssMsg(context.TODO(), fckedupmsg); err == nil { t.Fatalf("expected error from processMsg but error nil") } @@ -427,7 +427,7 @@ func TestHandlerConditions(t *testing.T) { ps.outbox <- msg } msg.Payload.Data = []byte{0x62, 0x61, 0x72} - err = ps.handlePssMsg(msg) + err = ps.handlePssMsg(context.TODO(), msg) if err == nil { t.Fatal("expected error when mailbox full, but was nil") } @@ -470,7 +470,7 @@ func TestKeys(t *testing.T) { } // make a symmetric key that we will send to peer for encrypting messages to us - inkeyid, err := ps.generateSymmetricKey(topicobj, &addr, true) + inkeyid, err := ps.GenerateSymmetricKey(topicobj, &addr, true) if err != nil { t.Fatalf("failed to set 'our' incoming symmetric key") } @@ -1296,7 +1296,7 @@ func benchmarkSymKeySend(b *testing.B) { topic := BytesToTopic([]byte("foo")) to := make(PssAddress, 32) copy(to[:], network.RandomAddr().Over()) - symkeyid, err := ps.generateSymmetricKey(topic, &to, true) + symkeyid, err := ps.GenerateSymmetricKey(topic, &to, true) if err != nil { b.Fatalf("could not generate symkey: %v", err) } @@ -1389,7 +1389,7 @@ func benchmarkSymkeyBruteforceChangeaddr(b *testing.B) { for i := 0; i < int(keycount); i++ { to := make(PssAddress, 32) copy(to[:], network.RandomAddr().Over()) - keyid, err = ps.generateSymmetricKey(topic, &to, true) + keyid, err = ps.GenerateSymmetricKey(topic, &to, true) if err != nil { b.Fatalf("cant generate symkey #%d: %v", i, err) } @@ -1471,7 +1471,7 @@ func benchmarkSymkeyBruteforceSameaddr(b *testing.B) { topic := BytesToTopic([]byte("foo")) for i := 0; i < int(keycount); i++ { copy(addr[i], network.RandomAddr().Over()) - keyid, err = ps.generateSymmetricKey(topic, &addr[i], true) + keyid, err = ps.GenerateSymmetricKey(topic, &addr[i], true) if err != nil { b.Fatalf("cant generate symkey #%d: %v", i, err) } diff --git a/swarm/sctx/sctx.go b/swarm/sctx/sctx.go new file mode 100644 index 000000000000..bed2b1145821 --- /dev/null +++ b/swarm/sctx/sctx.go @@ -0,0 +1,22 @@ +package sctx + +import "context" + +type ContextKey int + +const ( + HTTPRequestIDKey ContextKey = iota + requestHostKey +) + +func SetHost(ctx context.Context, domain string) context.Context { + return context.WithValue(ctx, requestHostKey, domain) +} + +func GetHost(ctx context.Context) string { + v, ok := ctx.Value(requestHostKey).(string) + if ok { + return v + } + return "" +} diff --git a/swarm/spancontext/spancontext.go b/swarm/spancontext/spancontext.go new file mode 100644 index 000000000000..2cb9f82f7c9e --- /dev/null +++ b/swarm/spancontext/spancontext.go @@ -0,0 +1,49 @@ +package spancontext + +import ( + "context" + + opentracing "github.com/opentracing/opentracing-go" +) + +func WithContext(ctx context.Context, sctx opentracing.SpanContext) context.Context { + return context.WithValue(ctx, "span_context", sctx) +} + +func FromContext(ctx context.Context) opentracing.SpanContext { + sctx, ok := ctx.Value("span_context").(opentracing.SpanContext) + if ok { + return sctx + } + + return nil +} + +func StartSpan(ctx context.Context, name string) (context.Context, opentracing.Span) { + tracer := opentracing.GlobalTracer() + + sctx := FromContext(ctx) + + var sp opentracing.Span + if sctx != nil { + sp = tracer.StartSpan( + name, + opentracing.ChildOf(sctx)) + } else { + sp = tracer.StartSpan(name) + } + + nctx := context.WithValue(ctx, "span_context", sp.Context()) + + return nctx, sp +} + +func StartSpanFrom(name string, sctx opentracing.SpanContext) opentracing.Span { + tracer := opentracing.GlobalTracer() + + sp := tracer.StartSpan( + name, + opentracing.ChildOf(sctx)) + + return sp +} diff --git a/swarm/storage/chunker.go b/swarm/storage/chunker.go index 5780742e38a4..6d805b8e2903 100644 --- a/swarm/storage/chunker.go +++ b/swarm/storage/chunker.go @@ -16,6 +16,7 @@ package storage import ( + "context" "encoding/binary" "errors" "fmt" @@ -24,7 +25,11 @@ import ( "time" "github.com/ethereum/go-ethereum/metrics" + "github.com/ethereum/go-ethereum/swarm/chunk" "github.com/ethereum/go-ethereum/swarm/log" + "github.com/ethereum/go-ethereum/swarm/spancontext" + opentracing "github.com/opentracing/opentracing-go" + olog "github.com/opentracing/opentracing-go/log" ) /* @@ -65,10 +70,6 @@ var ( errOperationTimedOut = errors.New("operation timed out") ) -const ( - DefaultChunkSize int64 = 4096 -) - type ChunkerParams struct { chunkSize int64 hashSize int64 @@ -92,9 +93,12 @@ type JoinerParams struct { getter Getter // TODO: there is a bug, so depth can only be 0 today, see: https://github.com/ethersphere/go-ethereum/issues/344 depth int + ctx context.Context } type TreeChunker struct { + ctx context.Context + branches int64 hashFunc SwarmHasher dataSize int64 @@ -126,29 +130,30 @@ type TreeChunker struct { The chunks are not meant to be validated by the chunker when joining. This is because it is left to the DPA to decide which sources are trusted. */ -func TreeJoin(addr Address, getter Getter, depth int) *LazyChunkReader { +func TreeJoin(ctx context.Context, addr Address, getter Getter, depth int) *LazyChunkReader { jp := &JoinerParams{ ChunkerParams: ChunkerParams{ - chunkSize: DefaultChunkSize, + chunkSize: chunk.DefaultSize, hashSize: int64(len(addr)), }, addr: addr, getter: getter, depth: depth, + ctx: ctx, } - return NewTreeJoiner(jp).Join() + return NewTreeJoiner(jp).Join(ctx) } /* When splitting, data is given as a SectionReader, and the key is a hashSize long byte slice (Key), the root hash of the entire content will fill this once processing finishes. New chunks to store are store using the putter which the caller provides. */ -func TreeSplit(data io.Reader, size int64, putter Putter) (k Address, wait func(), err error) { +func TreeSplit(ctx context.Context, data io.Reader, size int64, putter Putter) (k Address, wait func(context.Context) error, err error) { tsp := &TreeSplitterParams{ SplitterParams: SplitterParams{ ChunkerParams: ChunkerParams{ - chunkSize: DefaultChunkSize, + chunkSize: chunk.DefaultSize, hashSize: putter.RefSize(), }, reader: data, @@ -156,7 +161,7 @@ func TreeSplit(data io.Reader, size int64, putter Putter) (k Address, wait func( }, size: size, } - return NewTreeSplitter(tsp).Split() + return NewTreeSplitter(tsp).Split(ctx) } func NewTreeJoiner(params *JoinerParams) *TreeChunker { @@ -173,6 +178,8 @@ func NewTreeJoiner(params *JoinerParams) *TreeChunker { tc.errC = make(chan error) tc.quitC = make(chan bool) + tc.ctx = params.ctx + return tc } @@ -224,7 +231,7 @@ func (tc *TreeChunker) decrementWorkerCount() { tc.workerCount -= 1 } -func (tc *TreeChunker) Split() (k Address, wait func(), err error) { +func (tc *TreeChunker) Split(ctx context.Context) (k Address, wait func(context.Context) error, err error) { if tc.chunkSize <= 0 { panic("chunker must be initialised") } @@ -350,7 +357,7 @@ func (tc *TreeChunker) runWorker() { return } - h, err := tc.putter.Put(job.chunk) + h, err := tc.putter.Put(tc.ctx, job.chunk) if err != nil { tc.errC <- err return @@ -370,6 +377,7 @@ func (tc *TreeChunker) Append() (Address, func(), error) { // LazyChunkReader implements LazySectionReader type LazyChunkReader struct { + Ctx context.Context key Address // root key chunkData ChunkData off int64 // offset @@ -380,7 +388,7 @@ type LazyChunkReader struct { getter Getter } -func (tc *TreeChunker) Join() *LazyChunkReader { +func (tc *TreeChunker) Join(ctx context.Context) *LazyChunkReader { return &LazyChunkReader{ key: tc.addr, chunkSize: tc.chunkSize, @@ -388,16 +396,28 @@ func (tc *TreeChunker) Join() *LazyChunkReader { hashSize: tc.hashSize, depth: tc.depth, getter: tc.getter, + Ctx: tc.ctx, } } +func (r *LazyChunkReader) Context() context.Context { + return r.Ctx +} + // Size is meant to be called on the LazySectionReader -func (r *LazyChunkReader) Size(quitC chan bool) (n int64, err error) { +func (r *LazyChunkReader) Size(ctx context.Context, quitC chan bool) (n int64, err error) { metrics.GetOrRegisterCounter("lazychunkreader.size", nil).Inc(1) + var sp opentracing.Span + var cctx context.Context + cctx, sp = spancontext.StartSpan( + ctx, + "lcr.size") + defer sp.Finish() + log.Debug("lazychunkreader.size", "key", r.key) if r.chunkData == nil { - chunkData, err := r.getter.Get(Reference(r.key)) + chunkData, err := r.getter.Get(cctx, Reference(r.key)) if err != nil { return 0, err } @@ -420,12 +440,25 @@ func (r *LazyChunkReader) Size(quitC chan bool) (n int64, err error) { func (r *LazyChunkReader) ReadAt(b []byte, off int64) (read int, err error) { metrics.GetOrRegisterCounter("lazychunkreader.readat", nil).Inc(1) + var sp opentracing.Span + var cctx context.Context + cctx, sp = spancontext.StartSpan( + r.Ctx, + "lcr.read") + defer sp.Finish() + + defer func() { + sp.LogFields( + olog.Int("off", int(off)), + olog.Int("read", read)) + }() + // this is correct, a swarm doc cannot be zero length, so no EOF is expected if len(b) == 0 { return 0, nil } quitC := make(chan bool) - size, err := r.Size(quitC) + size, err := r.Size(cctx, quitC) if err != nil { log.Error("lazychunkreader.readat.size", "size", size, "err", err) return 0, err @@ -448,7 +481,7 @@ func (r *LazyChunkReader) ReadAt(b []byte, off int64) (read int, err error) { length *= r.chunkSize } wg.Add(1) - go r.join(b, off, off+length, depth, treeSize/r.branches, r.chunkData, &wg, errC, quitC) + go r.join(cctx, b, off, off+length, depth, treeSize/r.branches, r.chunkData, &wg, errC, quitC) go func() { wg.Wait() close(errC) @@ -466,7 +499,7 @@ func (r *LazyChunkReader) ReadAt(b []byte, off int64) (read int, err error) { return len(b), nil } -func (r *LazyChunkReader) join(b []byte, off int64, eoff int64, depth int, treeSize int64, chunkData ChunkData, parentWg *sync.WaitGroup, errC chan error, quitC chan bool) { +func (r *LazyChunkReader) join(ctx context.Context, b []byte, off int64, eoff int64, depth int, treeSize int64, chunkData ChunkData, parentWg *sync.WaitGroup, errC chan error, quitC chan bool) { defer parentWg.Done() // find appropriate block level for chunkData.Size() < treeSize && depth > r.depth { @@ -513,7 +546,7 @@ func (r *LazyChunkReader) join(b []byte, off int64, eoff int64, depth int, treeS wg.Add(1) go func(j int64) { childKey := chunkData[8+j*r.hashSize : 8+(j+1)*r.hashSize] - chunkData, err := r.getter.Get(Reference(childKey)) + chunkData, err := r.getter.Get(ctx, Reference(childKey)) if err != nil { log.Error("lazychunkreader.join", "key", fmt.Sprintf("%x", childKey), "err", err) select { @@ -532,7 +565,7 @@ func (r *LazyChunkReader) join(b []byte, off int64, eoff int64, depth int, treeS if soff < off { soff = off } - r.join(b[soff-off:seoff-off], soff-roff, seoff-roff, depth-1, treeSize/r.branches, chunkData, wg, errC, quitC) + r.join(ctx, b[soff-off:seoff-off], soff-roff, seoff-roff, depth-1, treeSize/r.branches, chunkData, wg, errC, quitC) }(i) } //for } @@ -569,7 +602,7 @@ func (r *LazyChunkReader) Seek(offset int64, whence int) (int64, error) { offset += r.off case 2: if r.chunkData == nil { //seek from the end requires rootchunk for size. call Size first - _, err := r.Size(nil) + _, err := r.Size(context.TODO(), nil) if err != nil { return 0, fmt.Errorf("can't get size: %v", err) } diff --git a/swarm/storage/chunker_test.go b/swarm/storage/chunker_test.go index d8be13ef6bb2..dbcc8700d17b 100644 --- a/swarm/storage/chunker_test.go +++ b/swarm/storage/chunker_test.go @@ -18,6 +18,7 @@ package storage import ( "bytes" + "context" "crypto/rand" "encoding/binary" "errors" @@ -49,11 +50,11 @@ type fakeChunkStore struct { } // Put doesn't store anything it is just here to implement ChunkStore -func (f *fakeChunkStore) Put(*Chunk) { +func (f *fakeChunkStore) Put(context.Context, *Chunk) { } // Gut doesn't store anything it is just here to implement ChunkStore -func (f *fakeChunkStore) Get(Address) (*Chunk, error) { +func (f *fakeChunkStore) Get(context.Context, Address) (*Chunk, error) { return nil, errors.New("FakeChunkStore doesn't support Get") } @@ -81,7 +82,7 @@ func testRandomBrokenData(n int, tester *chunkerTester) { putGetter := newTestHasherStore(NewMapChunkStore(), SHA3Hash) expectedError := fmt.Errorf("Broken reader") - addr, _, err := TreeSplit(brokendata, int64(n), putGetter) + addr, _, err := TreeSplit(context.TODO(), brokendata, int64(n), putGetter) if err == nil || err.Error() != expectedError.Error() { tester.t.Fatalf("Not receiving the correct error! Expected %v, received %v", expectedError, err) } @@ -104,20 +105,24 @@ func testRandomData(usePyramid bool, hash string, n int, tester *chunkerTester) putGetter := newTestHasherStore(NewMapChunkStore(), hash) var addr Address - var wait func() + var wait func(context.Context) error var err error + ctx := context.TODO() if usePyramid { - addr, wait, err = PyramidSplit(data, putGetter, putGetter) + addr, wait, err = PyramidSplit(ctx, data, putGetter, putGetter) } else { - addr, wait, err = TreeSplit(data, int64(n), putGetter) + addr, wait, err = TreeSplit(ctx, data, int64(n), putGetter) } if err != nil { tester.t.Fatalf(err.Error()) } tester.t.Logf(" Key = %v\n", addr) - wait() + err = wait(ctx) + if err != nil { + tester.t.Fatalf(err.Error()) + } - reader := TreeJoin(addr, putGetter, 0) + reader := TreeJoin(context.TODO(), addr, putGetter, 0) output := make([]byte, n) r, err := reader.Read(output) if r != n || err != io.EOF { @@ -200,11 +205,15 @@ func TestDataAppend(t *testing.T) { chunkStore := NewMapChunkStore() putGetter := newTestHasherStore(chunkStore, SHA3Hash) - addr, wait, err := PyramidSplit(data, putGetter, putGetter) + ctx := context.TODO() + addr, wait, err := PyramidSplit(ctx, data, putGetter, putGetter) + if err != nil { + tester.t.Fatalf(err.Error()) + } + err = wait(ctx) if err != nil { tester.t.Fatalf(err.Error()) } - wait() //create a append data stream appendInput, found := tester.inputs[uint64(m)] @@ -217,13 +226,16 @@ func TestDataAppend(t *testing.T) { } putGetter = newTestHasherStore(chunkStore, SHA3Hash) - newAddr, wait, err := PyramidAppend(addr, appendData, putGetter, putGetter) + newAddr, wait, err := PyramidAppend(ctx, addr, appendData, putGetter, putGetter) + if err != nil { + tester.t.Fatalf(err.Error()) + } + err = wait(ctx) if err != nil { tester.t.Fatalf(err.Error()) } - wait() - reader := TreeJoin(newAddr, putGetter, 0) + reader := TreeJoin(ctx, newAddr, putGetter, 0) newOutput := make([]byte, n+m) r, err := reader.Read(newOutput) if r != (n + m) { @@ -269,7 +281,7 @@ func TestRandomBrokenData(t *testing.T) { } func benchReadAll(reader LazySectionReader) { - size, _ := reader.Size(nil) + size, _ := reader.Size(context.TODO(), nil) output := make([]byte, 1000) for pos := int64(0); pos < size; pos += 1000 { reader.ReadAt(output, pos) @@ -282,12 +294,16 @@ func benchmarkSplitJoin(n int, t *testing.B) { data := testDataReader(n) putGetter := newTestHasherStore(NewMapChunkStore(), SHA3Hash) - key, wait, err := PyramidSplit(data, putGetter, putGetter) + ctx := context.TODO() + key, wait, err := PyramidSplit(ctx, data, putGetter, putGetter) if err != nil { t.Fatalf(err.Error()) } - wait() - reader := TreeJoin(key, putGetter, 0) + err = wait(ctx) + if err != nil { + t.Fatalf(err.Error()) + } + reader := TreeJoin(ctx, key, putGetter, 0) benchReadAll(reader) } } @@ -298,7 +314,7 @@ func benchmarkSplitTreeSHA3(n int, t *testing.B) { data := testDataReader(n) putGetter := newTestHasherStore(&fakeChunkStore{}, SHA3Hash) - _, _, err := TreeSplit(data, int64(n), putGetter) + _, _, err := TreeSplit(context.TODO(), data, int64(n), putGetter) if err != nil { t.Fatalf(err.Error()) } @@ -311,7 +327,7 @@ func benchmarkSplitTreeBMT(n int, t *testing.B) { data := testDataReader(n) putGetter := newTestHasherStore(&fakeChunkStore{}, BMTHash) - _, _, err := TreeSplit(data, int64(n), putGetter) + _, _, err := TreeSplit(context.TODO(), data, int64(n), putGetter) if err != nil { t.Fatalf(err.Error()) } @@ -324,7 +340,7 @@ func benchmarkSplitPyramidSHA3(n int, t *testing.B) { data := testDataReader(n) putGetter := newTestHasherStore(&fakeChunkStore{}, SHA3Hash) - _, _, err := PyramidSplit(data, putGetter, putGetter) + _, _, err := PyramidSplit(context.TODO(), data, putGetter, putGetter) if err != nil { t.Fatalf(err.Error()) } @@ -338,7 +354,7 @@ func benchmarkSplitPyramidBMT(n int, t *testing.B) { data := testDataReader(n) putGetter := newTestHasherStore(&fakeChunkStore{}, BMTHash) - _, _, err := PyramidSplit(data, putGetter, putGetter) + _, _, err := PyramidSplit(context.TODO(), data, putGetter, putGetter) if err != nil { t.Fatalf(err.Error()) } @@ -354,18 +370,25 @@ func benchmarkSplitAppendPyramid(n, m int, t *testing.B) { chunkStore := NewMapChunkStore() putGetter := newTestHasherStore(chunkStore, SHA3Hash) - key, wait, err := PyramidSplit(data, putGetter, putGetter) + ctx := context.TODO() + key, wait, err := PyramidSplit(ctx, data, putGetter, putGetter) + if err != nil { + t.Fatalf(err.Error()) + } + err = wait(ctx) if err != nil { t.Fatalf(err.Error()) } - wait() putGetter = newTestHasherStore(chunkStore, SHA3Hash) - _, wait, err = PyramidAppend(key, data1, putGetter, putGetter) + _, wait, err = PyramidAppend(ctx, key, data1, putGetter, putGetter) + if err != nil { + t.Fatalf(err.Error()) + } + err = wait(ctx) if err != nil { t.Fatalf(err.Error()) } - wait() } } diff --git a/swarm/storage/chunkstore.go b/swarm/storage/chunkstore.go index ce95cd971f4e..3b4d97a7a763 100644 --- a/swarm/storage/chunkstore.go +++ b/swarm/storage/chunkstore.go @@ -16,7 +16,10 @@ package storage -import "sync" +import ( + "context" + "sync" +) /* ChunkStore interface is implemented by : @@ -28,8 +31,8 @@ ChunkStore interface is implemented by : - FakeChunkStore: dummy store which doesn't store anything just implements the interface */ type ChunkStore interface { - Put(*Chunk) // effectively there is no error even if there is an error - Get(Address) (*Chunk, error) + Put(context.Context, *Chunk) // effectively there is no error even if there is an error + Get(context.Context, Address) (*Chunk, error) Close() } @@ -45,14 +48,14 @@ func NewMapChunkStore() *MapChunkStore { } } -func (m *MapChunkStore) Put(chunk *Chunk) { +func (m *MapChunkStore) Put(ctx context.Context, chunk *Chunk) { m.mu.Lock() defer m.mu.Unlock() m.chunks[chunk.Addr.Hex()] = chunk chunk.markAsStored() } -func (m *MapChunkStore) Get(addr Address) (*Chunk, error) { +func (m *MapChunkStore) Get(ctx context.Context, addr Address) (*Chunk, error) { m.mu.RLock() defer m.mu.RUnlock() chunk := m.chunks[addr.Hex()] diff --git a/swarm/storage/common.go b/swarm/storage/common.go index d86cb691448e..d6352820eb92 100644 --- a/swarm/storage/common.go +++ b/swarm/storage/common.go @@ -16,6 +16,7 @@ package storage import ( + "context" "sync" "github.com/ethereum/go-ethereum/swarm/log" @@ -37,7 +38,7 @@ func PutChunks(store *LocalStore, chunks ...*Chunk) { } }() for _, c := range chunks { - go store.Put(c) + go store.Put(context.TODO(), c) } wg.Wait() } diff --git a/swarm/storage/common_test.go b/swarm/storage/common_test.go index c6e97d68fcea..dc1a3ab35f77 100644 --- a/swarm/storage/common_test.go +++ b/swarm/storage/common_test.go @@ -18,6 +18,7 @@ package storage import ( "bytes" + "context" "crypto/rand" "flag" "fmt" @@ -69,7 +70,7 @@ func mput(store ChunkStore, processors int, n int, f func(i int64) *Chunk) (hs [ for chunk := range c { wg.Add(1) chunk := chunk - store.Put(chunk) + store.Put(context.TODO(), chunk) go func() { defer wg.Done() <-chunk.dbStoredC @@ -103,7 +104,7 @@ func mget(store ChunkStore, hs []Address, f func(h Address, chunk *Chunk) error) for _, k := range hs { go func(h Address) { defer wg.Done() - chunk, err := store.Get(h) + chunk, err := store.Get(context.TODO(), h) if err != nil { errc <- err return diff --git a/swarm/storage/dbapi.go b/swarm/storage/dbapi.go index 24234b031772..dd71752eb2fe 100644 --- a/swarm/storage/dbapi.go +++ b/swarm/storage/dbapi.go @@ -16,6 +16,8 @@ package storage +import "context" + // wrapper of db-s to provide mockable custom local chunk store access to syncer type DBAPI struct { db *LDBStore @@ -27,8 +29,8 @@ func NewDBAPI(loc *LocalStore) *DBAPI { } // to obtain the chunks from address or request db entry only -func (d *DBAPI) Get(addr Address) (*Chunk, error) { - return d.loc.Get(addr) +func (d *DBAPI) Get(ctx context.Context, addr Address) (*Chunk, error) { + return d.loc.Get(ctx, addr) } // current storage counter of chunk db @@ -42,11 +44,11 @@ func (d *DBAPI) Iterator(from uint64, to uint64, po uint8, f func(Address, uint6 } // to obtain the chunks from address or request db entry only -func (d *DBAPI) GetOrCreateRequest(addr Address) (*Chunk, bool) { - return d.loc.GetOrCreateRequest(addr) +func (d *DBAPI) GetOrCreateRequest(ctx context.Context, addr Address) (*Chunk, bool) { + return d.loc.GetOrCreateRequest(ctx, addr) } // to obtain the chunks from key or request db entry only -func (d *DBAPI) Put(chunk *Chunk) { - d.loc.Put(chunk) +func (d *DBAPI) Put(ctx context.Context, chunk *Chunk) { + d.loc.Put(ctx, chunk) } diff --git a/swarm/storage/filestore.go b/swarm/storage/filestore.go index c0b463debdd1..2d8d82d95a50 100644 --- a/swarm/storage/filestore.go +++ b/swarm/storage/filestore.go @@ -17,6 +17,7 @@ package storage import ( + "context" "io" ) @@ -78,18 +79,18 @@ func NewFileStore(store ChunkStore, params *FileStoreParams) *FileStore { // Chunk retrieval blocks on netStore requests with a timeout so reader will // report error if retrieval of chunks within requested range time out. // It returns a reader with the chunk data and whether the content was encrypted -func (f *FileStore) Retrieve(addr Address) (reader *LazyChunkReader, isEncrypted bool) { +func (f *FileStore) Retrieve(ctx context.Context, addr Address) (reader *LazyChunkReader, isEncrypted bool) { isEncrypted = len(addr) > f.hashFunc().Size() getter := NewHasherStore(f.ChunkStore, f.hashFunc, isEncrypted) - reader = TreeJoin(addr, getter, 0) + reader = TreeJoin(ctx, addr, getter, 0) return } // Public API. Main entry point for document storage directly. Used by the // FS-aware API and httpaccess -func (f *FileStore) Store(data io.Reader, size int64, toEncrypt bool) (addr Address, wait func(), err error) { +func (f *FileStore) Store(ctx context.Context, data io.Reader, size int64, toEncrypt bool) (addr Address, wait func(context.Context) error, err error) { putter := NewHasherStore(f.ChunkStore, f.hashFunc, toEncrypt) - return PyramidSplit(data, putter, putter) + return PyramidSplit(ctx, data, putter, putter) } func (f *FileStore) HashSize() int { diff --git a/swarm/storage/filestore_test.go b/swarm/storage/filestore_test.go index 1aaec5e5cc4b..f3f597255884 100644 --- a/swarm/storage/filestore_test.go +++ b/swarm/storage/filestore_test.go @@ -18,6 +18,7 @@ package storage import ( "bytes" + "context" "io" "io/ioutil" "os" @@ -49,12 +50,16 @@ func testFileStoreRandom(toEncrypt bool, t *testing.T) { defer os.RemoveAll("/tmp/bzz") reader, slice := generateRandomData(testDataSize) - key, wait, err := fileStore.Store(reader, testDataSize, toEncrypt) + ctx := context.TODO() + key, wait, err := fileStore.Store(ctx, reader, testDataSize, toEncrypt) if err != nil { t.Errorf("Store error: %v", err) } - wait() - resultReader, isEncrypted := fileStore.Retrieve(key) + err = wait(ctx) + if err != nil { + t.Fatalf("Store waitt error: %v", err.Error()) + } + resultReader, isEncrypted := fileStore.Retrieve(context.TODO(), key) if isEncrypted != toEncrypt { t.Fatalf("isEncrypted expected %v got %v", toEncrypt, isEncrypted) } @@ -72,7 +77,7 @@ func testFileStoreRandom(toEncrypt bool, t *testing.T) { ioutil.WriteFile("/tmp/slice.bzz.16M", slice, 0666) ioutil.WriteFile("/tmp/result.bzz.16M", resultSlice, 0666) localStore.memStore = NewMemStore(NewDefaultStoreParams(), db) - resultReader, isEncrypted = fileStore.Retrieve(key) + resultReader, isEncrypted = fileStore.Retrieve(context.TODO(), key) if isEncrypted != toEncrypt { t.Fatalf("isEncrypted expected %v got %v", toEncrypt, isEncrypted) } @@ -110,12 +115,16 @@ func testFileStoreCapacity(toEncrypt bool, t *testing.T) { } fileStore := NewFileStore(localStore, NewFileStoreParams()) reader, slice := generateRandomData(testDataSize) - key, wait, err := fileStore.Store(reader, testDataSize, toEncrypt) + ctx := context.TODO() + key, wait, err := fileStore.Store(ctx, reader, testDataSize, toEncrypt) + if err != nil { + t.Errorf("Store error: %v", err) + } + err = wait(ctx) if err != nil { t.Errorf("Store error: %v", err) } - wait() - resultReader, isEncrypted := fileStore.Retrieve(key) + resultReader, isEncrypted := fileStore.Retrieve(context.TODO(), key) if isEncrypted != toEncrypt { t.Fatalf("isEncrypted expected %v got %v", toEncrypt, isEncrypted) } @@ -134,7 +143,7 @@ func testFileStoreCapacity(toEncrypt bool, t *testing.T) { memStore.setCapacity(0) // check whether it is, indeed, empty fileStore.ChunkStore = memStore - resultReader, isEncrypted = fileStore.Retrieve(key) + resultReader, isEncrypted = fileStore.Retrieve(context.TODO(), key) if isEncrypted != toEncrypt { t.Fatalf("isEncrypted expected %v got %v", toEncrypt, isEncrypted) } @@ -144,7 +153,7 @@ func testFileStoreCapacity(toEncrypt bool, t *testing.T) { // check how it works with localStore fileStore.ChunkStore = localStore // localStore.dbStore.setCapacity(0) - resultReader, isEncrypted = fileStore.Retrieve(key) + resultReader, isEncrypted = fileStore.Retrieve(context.TODO(), key) if isEncrypted != toEncrypt { t.Fatalf("isEncrypted expected %v got %v", toEncrypt, isEncrypted) } diff --git a/swarm/storage/hasherstore.go b/swarm/storage/hasherstore.go index e659c3681ef6..bc23077c1832 100644 --- a/swarm/storage/hasherstore.go +++ b/swarm/storage/hasherstore.go @@ -17,10 +17,12 @@ package storage import ( + "context" "fmt" "sync" "github.com/ethereum/go-ethereum/crypto/sha3" + "github.com/ethereum/go-ethereum/swarm/chunk" "github.com/ethereum/go-ethereum/swarm/storage/encryption" ) @@ -56,7 +58,7 @@ func NewHasherStore(chunkStore ChunkStore, hashFunc SwarmHasher, toEncrypt bool) refSize := int64(hashSize) if toEncrypt { refSize += encryption.KeyLength - chunkEncryption = newChunkEncryption(DefaultChunkSize, refSize) + chunkEncryption = newChunkEncryption(chunk.DefaultSize, refSize) } return &hasherStore{ @@ -73,7 +75,7 @@ func NewHasherStore(chunkStore ChunkStore, hashFunc SwarmHasher, toEncrypt bool) // Put stores the chunkData into the ChunkStore of the hasherStore and returns the reference. // If hasherStore has a chunkEncryption object, the data will be encrypted. // Asynchronous function, the data will not necessarily be stored when it returns. -func (h *hasherStore) Put(chunkData ChunkData) (Reference, error) { +func (h *hasherStore) Put(ctx context.Context, chunkData ChunkData) (Reference, error) { c := chunkData size := chunkData.Size() var encryptionKey encryption.Key @@ -86,7 +88,7 @@ func (h *hasherStore) Put(chunkData ChunkData) (Reference, error) { } chunk := h.createChunk(c, size) - h.storeChunk(chunk) + h.storeChunk(ctx, chunk) return Reference(append(chunk.Addr, encryptionKey...)), nil } @@ -94,14 +96,14 @@ func (h *hasherStore) Put(chunkData ChunkData) (Reference, error) { // Get returns data of the chunk with the given reference (retrieved from the ChunkStore of hasherStore). // If the data is encrypted and the reference contains an encryption key, it will be decrypted before // return. -func (h *hasherStore) Get(ref Reference) (ChunkData, error) { +func (h *hasherStore) Get(ctx context.Context, ref Reference) (ChunkData, error) { key, encryptionKey, err := parseReference(ref, h.hashSize) if err != nil { return nil, err } toDecrypt := (encryptionKey != nil) - chunk, err := h.store.Get(key) + chunk, err := h.store.Get(ctx, key) if err != nil { return nil, err } @@ -126,9 +128,10 @@ func (h *hasherStore) Close() { // Wait returns when // 1) the Close() function has been called and // 2) all the chunks which has been Put has been stored -func (h *hasherStore) Wait() { +func (h *hasherStore) Wait(ctx context.Context) error { <-h.closed h.wg.Wait() + return nil } func (h *hasherStore) createHash(chunkData ChunkData) Address { @@ -188,9 +191,9 @@ func (h *hasherStore) decryptChunkData(chunkData ChunkData, encryptionKey encryp // removing extra bytes which were just added for padding length := ChunkData(decryptedSpan).Size() - for length > DefaultChunkSize { - length = length + (DefaultChunkSize - 1) - length = length / DefaultChunkSize + for length > chunk.DefaultSize { + length = length + (chunk.DefaultSize - 1) + length = length / chunk.DefaultSize length *= h.refSize } @@ -205,13 +208,13 @@ func (h *hasherStore) RefSize() int64 { return h.refSize } -func (h *hasherStore) storeChunk(chunk *Chunk) { +func (h *hasherStore) storeChunk(ctx context.Context, chunk *Chunk) { h.wg.Add(1) go func() { <-chunk.dbStoredC h.wg.Done() }() - h.store.Put(chunk) + h.store.Put(ctx, chunk) } func parseReference(ref Reference, hashSize int) (Address, encryption.Key, error) { diff --git a/swarm/storage/hasherstore_test.go b/swarm/storage/hasherstore_test.go index ccb37524a01b..ddf1c39b083d 100644 --- a/swarm/storage/hasherstore_test.go +++ b/swarm/storage/hasherstore_test.go @@ -18,6 +18,7 @@ package storage import ( "bytes" + "context" "testing" "github.com/ethereum/go-ethereum/swarm/storage/encryption" @@ -46,13 +47,13 @@ func TestHasherStore(t *testing.T) { // Put two random chunks into the hasherStore chunkData1 := GenerateRandomChunk(int64(tt.chunkLength)).SData - key1, err := hasherStore.Put(chunkData1) + key1, err := hasherStore.Put(context.TODO(), chunkData1) if err != nil { t.Fatalf("Expected no error got \"%v\"", err) } chunkData2 := GenerateRandomChunk(int64(tt.chunkLength)).SData - key2, err := hasherStore.Put(chunkData2) + key2, err := hasherStore.Put(context.TODO(), chunkData2) if err != nil { t.Fatalf("Expected no error got \"%v\"", err) } @@ -60,10 +61,13 @@ func TestHasherStore(t *testing.T) { hasherStore.Close() // Wait until chunks are really stored - hasherStore.Wait() + err = hasherStore.Wait(context.TODO()) + if err != nil { + t.Fatalf("Expected no error got \"%v\"", err) + } // Get the first chunk - retrievedChunkData1, err := hasherStore.Get(key1) + retrievedChunkData1, err := hasherStore.Get(context.TODO(), key1) if err != nil { t.Fatalf("Expected no error, got \"%v\"", err) } @@ -74,7 +78,7 @@ func TestHasherStore(t *testing.T) { } // Get the second chunk - retrievedChunkData2, err := hasherStore.Get(key2) + retrievedChunkData2, err := hasherStore.Get(context.TODO(), key2) if err != nil { t.Fatalf("Expected no error, got \"%v\"", err) } @@ -101,7 +105,7 @@ func TestHasherStore(t *testing.T) { } // Check if chunk data in store is encrypted or not - chunkInStore, err := chunkStore.Get(hash1) + chunkInStore, err := chunkStore.Get(context.TODO(), hash1) if err != nil { t.Fatalf("Expected no error got \"%v\"", err) } diff --git a/swarm/storage/ldbstore.go b/swarm/storage/ldbstore.go index 178b1ebc4089..b95aa13b09f0 100644 --- a/swarm/storage/ldbstore.go +++ b/swarm/storage/ldbstore.go @@ -25,6 +25,7 @@ package storage import ( "archive/tar" "bytes" + "context" "encoding/binary" "encoding/hex" "fmt" @@ -35,6 +36,7 @@ import ( "github.com/ethereum/go-ethereum/metrics" "github.com/ethereum/go-ethereum/rlp" + "github.com/ethereum/go-ethereum/swarm/chunk" "github.com/ethereum/go-ethereum/swarm/log" "github.com/ethereum/go-ethereum/swarm/storage/mock" "github.com/syndtr/goleveldb/leveldb" @@ -370,7 +372,7 @@ func (s *LDBStore) Import(in io.Reader) (int64, error) { key := Address(keybytes) chunk := NewChunk(key, nil) chunk.SData = data[32:] - s.Put(chunk) + s.Put(context.TODO(), chunk) wg.Add(1) go func() { defer wg.Done() @@ -383,14 +385,13 @@ func (s *LDBStore) Import(in io.Reader) (int64, error) { } func (s *LDBStore) Cleanup() { - //Iterates over the database and checks that there are no faulty chunks + //Iterates over the database and checks that there are no chunks bigger than 4kb + var errorsFound, removed, total int + it := s.db.NewIterator() - startPosition := []byte{keyIndex} - it.Seek(startPosition) - var key []byte - var errorsFound, total int - for it.Valid() { - key = it.Key() + defer it.Release() + for ok := it.Seek([]byte{keyIndex}); ok; ok = it.Next() { + key := it.Key() if (key == nil) || (key[0] != keyIndex) { break } @@ -398,27 +399,50 @@ func (s *LDBStore) Cleanup() { var index dpaDBIndex err := decodeIndex(it.Value(), &index) if err != nil { - it.Next() + log.Warn("Cannot decode") + errorsFound++ continue } - data, err := s.db.Get(getDataKey(index.Idx, s.po(Address(key[1:])))) + hash := key[1:] + po := s.po(hash) + datakey := getDataKey(index.Idx, po) + data, err := s.db.Get(datakey) if err != nil { - log.Warn(fmt.Sprintf("Chunk %x found but could not be accessed: %v", key[:], err)) - s.delete(index.Idx, getIndexKey(key[1:]), s.po(Address(key[1:]))) - errorsFound++ - } else { - hasher := s.hashfunc() - hasher.Write(data[32:]) - hash := hasher.Sum(nil) - if !bytes.Equal(hash, key[1:]) { - log.Warn(fmt.Sprintf("Found invalid chunk. Hash mismatch. hash=%x, key=%x", hash, key[:])) - s.delete(index.Idx, getIndexKey(key[1:]), s.po(Address(key[1:]))) + found := false + + // highest possible proximity is 255 + for po = 1; po <= 255; po++ { + datakey = getDataKey(index.Idx, po) + data, err = s.db.Get(datakey) + if err == nil { + found = true + break + } + } + + if !found { + log.Warn(fmt.Sprintf("Chunk %x found but count not be accessed with any po", key[:])) + errorsFound++ + continue } } - it.Next() + + c := &Chunk{} + ck := data[:32] + decodeData(data, c) + + cs := int64(binary.LittleEndian.Uint64(c.SData[:8])) + log.Trace("chunk", "key", fmt.Sprintf("%x", key[:]), "ck", fmt.Sprintf("%x", ck), "dkey", fmt.Sprintf("%x", datakey), "dataidx", index.Idx, "po", po, "len data", len(data), "len sdata", len(c.SData), "size", cs) + + if len(c.SData) > chunk.DefaultSize+8 { + log.Warn("chunk for cleanup", "key", fmt.Sprintf("%x", key[:]), "ck", fmt.Sprintf("%x", ck), "dkey", fmt.Sprintf("%x", datakey), "dataidx", index.Idx, "po", po, "len data", len(data), "len sdata", len(c.SData), "size", cs) + s.delete(index.Idx, getIndexKey(key[1:]), po) + removed++ + errorsFound++ + } } - it.Release() - log.Warn(fmt.Sprintf("Found %v errors out of %v entries", errorsFound, total)) + + log.Warn(fmt.Sprintf("Found %v errors out of %v entries. Removed %v chunks.", errorsFound, total, removed)) } func (s *LDBStore) ReIndex() { @@ -499,7 +523,7 @@ func (s *LDBStore) CurrentStorageIndex() uint64 { return s.dataIdx } -func (s *LDBStore) Put(chunk *Chunk) { +func (s *LDBStore) Put(ctx context.Context, chunk *Chunk) { metrics.GetOrRegisterCounter("ldbstore.put", nil).Inc(1) log.Trace("ldbstore.put", "key", chunk.Addr) @@ -639,7 +663,7 @@ func (s *LDBStore) tryAccessIdx(ikey []byte, index *dpaDBIndex) bool { return true } -func (s *LDBStore) Get(addr Address) (chunk *Chunk, err error) { +func (s *LDBStore) Get(ctx context.Context, addr Address) (chunk *Chunk, err error) { metrics.GetOrRegisterCounter("ldbstore.get", nil).Inc(1) log.Trace("ldbstore.get", "key", addr) diff --git a/swarm/storage/ldbstore_test.go b/swarm/storage/ldbstore_test.go index 2c706a75bd31..5ee88baa5126 100644 --- a/swarm/storage/ldbstore_test.go +++ b/swarm/storage/ldbstore_test.go @@ -18,6 +18,7 @@ package storage import ( "bytes" + "context" "fmt" "io/ioutil" "os" @@ -26,6 +27,7 @@ import ( "time" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/swarm/chunk" "github.com/ethereum/go-ethereum/swarm/log" "github.com/ethereum/go-ethereum/swarm/storage/mock/mem" @@ -59,12 +61,12 @@ func newTestDbStore(mock bool, trusted bool) (*testDbStore, func(), error) { } cleanup := func() { - if err != nil { + if db != nil { db.Close() } err = os.RemoveAll(dir) if err != nil { - panic("db cleanup failed") + panic(fmt.Sprintf("db cleanup failed: %v", err)) } } @@ -157,7 +159,7 @@ func testDbStoreNotFound(t *testing.T, mock bool) { t.Fatalf("init dbStore failed: %v", err) } - _, err = db.Get(ZeroAddr) + _, err = db.Get(context.TODO(), ZeroAddr) if err != ErrChunkNotFound { t.Errorf("Expected ErrChunkNotFound, got %v", err) } @@ -183,12 +185,12 @@ func testIterator(t *testing.T, mock bool) { t.Fatalf("init dbStore failed: %v", err) } - chunks := GenerateRandomChunks(DefaultChunkSize, chunkcount) + chunks := GenerateRandomChunks(chunk.DefaultSize, chunkcount) wg := &sync.WaitGroup{} wg.Add(len(chunks)) for i = 0; i < len(chunks); i++ { - db.Put(chunks[i]) + db.Put(context.TODO(), chunks[i]) chunkkeys[i] = chunks[i].Addr j := i go func() { @@ -293,13 +295,13 @@ func TestLDBStoreWithoutCollectGarbage(t *testing.T) { chunks := []*Chunk{} for i := 0; i < n; i++ { - c := GenerateRandomChunk(DefaultChunkSize) + c := GenerateRandomChunk(chunk.DefaultSize) chunks = append(chunks, c) log.Trace("generate random chunk", "idx", i, "chunk", c) } for i := 0; i < n; i++ { - go ldb.Put(chunks[i]) + go ldb.Put(context.TODO(), chunks[i]) } // wait for all chunks to be stored @@ -310,7 +312,7 @@ func TestLDBStoreWithoutCollectGarbage(t *testing.T) { log.Info("ldbstore", "entrycnt", ldb.entryCnt, "accesscnt", ldb.accessCnt) for i := 0; i < n; i++ { - ret, err := ldb.Get(chunks[i].Addr) + ret, err := ldb.Get(context.TODO(), chunks[i].Addr) if err != nil { t.Fatal(err) } @@ -343,13 +345,13 @@ func TestLDBStoreCollectGarbage(t *testing.T) { chunks := []*Chunk{} for i := 0; i < n; i++ { - c := GenerateRandomChunk(DefaultChunkSize) + c := GenerateRandomChunk(chunk.DefaultSize) chunks = append(chunks, c) log.Trace("generate random chunk", "idx", i, "chunk", c) } for i := 0; i < n; i++ { - ldb.Put(chunks[i]) + ldb.Put(context.TODO(), chunks[i]) } // wait for all chunks to be stored @@ -364,7 +366,7 @@ func TestLDBStoreCollectGarbage(t *testing.T) { var missing int for i := 0; i < n; i++ { - ret, err := ldb.Get(chunks[i].Addr) + ret, err := ldb.Get(context.TODO(), chunks[i].Addr) if err == ErrChunkNotFound || err == ldberrors.ErrNotFound { missing++ continue @@ -397,13 +399,13 @@ func TestLDBStoreAddRemove(t *testing.T) { chunks := []*Chunk{} for i := 0; i < n; i++ { - c := GenerateRandomChunk(DefaultChunkSize) + c := GenerateRandomChunk(chunk.DefaultSize) chunks = append(chunks, c) log.Trace("generate random chunk", "idx", i, "chunk", c) } for i := 0; i < n; i++ { - go ldb.Put(chunks[i]) + go ldb.Put(context.TODO(), chunks[i]) } // wait for all chunks to be stored before continuing @@ -428,7 +430,7 @@ func TestLDBStoreAddRemove(t *testing.T) { log.Info("ldbstore", "entrycnt", ldb.entryCnt, "accesscnt", ldb.accessCnt) for i := 0; i < n; i++ { - ret, err := ldb.Get(chunks[i].Addr) + ret, err := ldb.Get(context.TODO(), chunks[i].Addr) if i%2 == 0 { // expect even chunks to be missing @@ -459,13 +461,13 @@ func TestLDBStoreRemoveThenCollectGarbage(t *testing.T) { chunks := []*Chunk{} for i := 0; i < capacity; i++ { - c := GenerateRandomChunk(DefaultChunkSize) + c := GenerateRandomChunk(chunk.DefaultSize) chunks = append(chunks, c) log.Trace("generate random chunk", "idx", i, "chunk", c) } for i := 0; i < n; i++ { - ldb.Put(chunks[i]) + ldb.Put(context.TODO(), chunks[i]) } // wait for all chunks to be stored before continuing @@ -494,7 +496,7 @@ func TestLDBStoreRemoveThenCollectGarbage(t *testing.T) { n = 10 for i := 0; i < n; i++ { - ldb.Put(chunks[i]) + ldb.Put(context.TODO(), chunks[i]) } // wait for all chunks to be stored before continuing @@ -504,14 +506,14 @@ func TestLDBStoreRemoveThenCollectGarbage(t *testing.T) { // expect for first chunk to be missing, because it has the smallest access value idx := 0 - ret, err := ldb.Get(chunks[idx].Addr) + ret, err := ldb.Get(context.TODO(), chunks[idx].Addr) if err == nil || ret != nil { t.Fatal("expected first chunk to be missing, but got no error") } // expect for last chunk to be present, as it has the largest access value idx = 9 - ret, err = ldb.Get(chunks[idx].Addr) + ret, err = ldb.Get(context.TODO(), chunks[idx].Addr) if err != nil { t.Fatalf("expected no error, but got %s", err) } diff --git a/swarm/storage/localstore.go b/swarm/storage/localstore.go index 4c57086faf90..9e34749797c5 100644 --- a/swarm/storage/localstore.go +++ b/swarm/storage/localstore.go @@ -17,6 +17,7 @@ package storage import ( + "context" "encoding/binary" "fmt" "path/filepath" @@ -96,21 +97,17 @@ func NewTestLocalStoreForAddr(params *LocalStoreParams) (*LocalStore, error) { // when the chunk is stored in memstore. // After the LDBStore.Put, it is ensured that the MemStore // contains the chunk with the same data, but nil ReqC channel. -func (ls *LocalStore) Put(chunk *Chunk) { - if l := len(chunk.SData); l < 9 { - log.Debug("incomplete chunk data", "addr", chunk.Addr, "length", l) - chunk.SetErrored(ErrChunkInvalid) - chunk.markAsStored() - return - } +func (ls *LocalStore) Put(ctx context.Context, chunk *Chunk) { valid := true + // ls.Validators contains a list of one validator per chunk type. + // if one validator succeeds, then the chunk is valid for _, v := range ls.Validators { if valid = v.Validate(chunk.Addr, chunk.SData); valid { break } } if !valid { - log.Trace("invalid content address", "addr", chunk.Addr) + log.Trace("invalid chunk", "addr", chunk.Addr, "len", len(chunk.SData)) chunk.SetErrored(ErrChunkInvalid) chunk.markAsStored() return @@ -123,7 +120,7 @@ func (ls *LocalStore) Put(chunk *Chunk) { chunk.Size = int64(binary.LittleEndian.Uint64(chunk.SData[0:8])) - memChunk, err := ls.memStore.Get(chunk.Addr) + memChunk, err := ls.memStore.Get(ctx, chunk.Addr) switch err { case nil: if memChunk.ReqC == nil { @@ -136,7 +133,7 @@ func (ls *LocalStore) Put(chunk *Chunk) { return } - ls.DbStore.Put(chunk) + ls.DbStore.Put(ctx, chunk) // chunk is no longer a request, but a chunk with data, so replace it in memStore newc := NewChunk(chunk.Addr, nil) @@ -144,7 +141,7 @@ func (ls *LocalStore) Put(chunk *Chunk) { newc.Size = chunk.Size newc.dbStoredC = chunk.dbStoredC - ls.memStore.Put(newc) + ls.memStore.Put(ctx, newc) if memChunk != nil && memChunk.ReqC != nil { close(memChunk.ReqC) @@ -155,15 +152,15 @@ func (ls *LocalStore) Put(chunk *Chunk) { // This method is blocking until the chunk is retrieved // so additional timeout may be needed to wrap this call if // ChunkStores are remote and can have long latency -func (ls *LocalStore) Get(addr Address) (chunk *Chunk, err error) { +func (ls *LocalStore) Get(ctx context.Context, addr Address) (chunk *Chunk, err error) { ls.mu.Lock() defer ls.mu.Unlock() - return ls.get(addr) + return ls.get(ctx, addr) } -func (ls *LocalStore) get(addr Address) (chunk *Chunk, err error) { - chunk, err = ls.memStore.Get(addr) +func (ls *LocalStore) get(ctx context.Context, addr Address) (chunk *Chunk, err error) { + chunk, err = ls.memStore.Get(ctx, addr) if err == nil { if chunk.ReqC != nil { select { @@ -177,25 +174,25 @@ func (ls *LocalStore) get(addr Address) (chunk *Chunk, err error) { return } metrics.GetOrRegisterCounter("localstore.get.cachemiss", nil).Inc(1) - chunk, err = ls.DbStore.Get(addr) + chunk, err = ls.DbStore.Get(ctx, addr) if err != nil { metrics.GetOrRegisterCounter("localstore.get.error", nil).Inc(1) return } chunk.Size = int64(binary.LittleEndian.Uint64(chunk.SData[0:8])) - ls.memStore.Put(chunk) + ls.memStore.Put(ctx, chunk) return } // retrieve logic common for local and network chunk retrieval requests -func (ls *LocalStore) GetOrCreateRequest(addr Address) (chunk *Chunk, created bool) { +func (ls *LocalStore) GetOrCreateRequest(ctx context.Context, addr Address) (chunk *Chunk, created bool) { metrics.GetOrRegisterCounter("localstore.getorcreaterequest", nil).Inc(1) ls.mu.Lock() defer ls.mu.Unlock() var err error - chunk, err = ls.get(addr) + chunk, err = ls.get(ctx, addr) if err == nil && chunk.GetErrored() == nil { metrics.GetOrRegisterCounter("localstore.getorcreaterequest.hit", nil).Inc(1) log.Trace(fmt.Sprintf("LocalStore.GetOrRetrieve: %v found locally", addr)) @@ -210,7 +207,7 @@ func (ls *LocalStore) GetOrCreateRequest(addr Address) (chunk *Chunk, created bo metrics.GetOrRegisterCounter("localstore.getorcreaterequest.miss", nil).Inc(1) log.Trace(fmt.Sprintf("LocalStore.GetOrRetrieve: %v not found locally. open new request", addr)) chunk = NewChunk(addr, make(chan bool)) - ls.memStore.Put(chunk) + ls.memStore.Put(ctx, chunk) return chunk, true } diff --git a/swarm/storage/localstore_test.go b/swarm/storage/localstore_test.go index 2bb81efa3a03..ae62218fe8ac 100644 --- a/swarm/storage/localstore_test.go +++ b/swarm/storage/localstore_test.go @@ -20,6 +20,8 @@ import ( "io/ioutil" "os" "testing" + + "github.com/ethereum/go-ethereum/swarm/chunk" ) var ( @@ -61,7 +63,7 @@ func TestValidator(t *testing.T) { // add content address validator and check puts // bad should fail, good should pass store.Validators = append(store.Validators, NewContentAddressValidator(hashfunc)) - chunks = GenerateRandomChunks(DefaultChunkSize, 2) + chunks = GenerateRandomChunks(chunk.DefaultSize, 2) goodChunk = chunks[0] badChunk = chunks[1] copy(badChunk.SData, goodChunk.SData) @@ -79,7 +81,7 @@ func TestValidator(t *testing.T) { var negV boolTestValidator store.Validators = append(store.Validators, negV) - chunks = GenerateRandomChunks(DefaultChunkSize, 2) + chunks = GenerateRandomChunks(chunk.DefaultSize, 2) goodChunk = chunks[0] badChunk = chunks[1] copy(badChunk.SData, goodChunk.SData) @@ -97,7 +99,7 @@ func TestValidator(t *testing.T) { var posV boolTestValidator = true store.Validators = append(store.Validators, posV) - chunks = GenerateRandomChunks(DefaultChunkSize, 2) + chunks = GenerateRandomChunks(chunk.DefaultSize, 2) goodChunk = chunks[0] badChunk = chunks[1] copy(badChunk.SData, goodChunk.SData) diff --git a/swarm/storage/memstore.go b/swarm/storage/memstore.go index 7af31ffbd15d..55cfcbfeaf41 100644 --- a/swarm/storage/memstore.go +++ b/swarm/storage/memstore.go @@ -19,6 +19,7 @@ package storage import ( + "context" "sync" lru "github.com/hashicorp/golang-lru" @@ -68,7 +69,7 @@ func NewMemStore(params *StoreParams, _ *LDBStore) (m *MemStore) { } } -func (m *MemStore) Get(addr Address) (*Chunk, error) { +func (m *MemStore) Get(ctx context.Context, addr Address) (*Chunk, error) { if m.disabled { return nil, ErrChunkNotFound } @@ -90,7 +91,7 @@ func (m *MemStore) Get(addr Address) (*Chunk, error) { return c.(*Chunk), nil } -func (m *MemStore) Put(c *Chunk) { +func (m *MemStore) Put(ctx context.Context, c *Chunk) { if m.disabled { return } diff --git a/swarm/storage/memstore_test.go b/swarm/storage/memstore_test.go index 5c68a4b4be6f..2c1b0e89e601 100644 --- a/swarm/storage/memstore_test.go +++ b/swarm/storage/memstore_test.go @@ -17,6 +17,7 @@ package storage import ( + "context" "crypto/rand" "encoding/binary" "io/ioutil" @@ -72,7 +73,7 @@ func TestMemStoreNotFound(t *testing.T) { m := newTestMemStore() defer m.Close() - _, err := m.Get(ZeroAddr) + _, err := m.Get(context.TODO(), ZeroAddr) if err != ErrChunkNotFound { t.Errorf("Expected ErrChunkNotFound, got %v", err) } @@ -187,8 +188,8 @@ func TestMemStoreAndLDBStore(t *testing.T) { } for i := 0; i < tt.n; i++ { - go ldb.Put(chunks[i]) - memStore.Put(chunks[i]) + go ldb.Put(context.TODO(), chunks[i]) + memStore.Put(context.TODO(), chunks[i]) if got := memStore.cache.Len(); got > cacheCap { t.Fatalf("expected to get cache capacity less than %v, but got %v", cacheCap, got) @@ -200,10 +201,10 @@ func TestMemStoreAndLDBStore(t *testing.T) { } for i := 0; i < tt.n; i++ { - _, err := memStore.Get(chunks[i].Addr) + _, err := memStore.Get(context.TODO(), chunks[i].Addr) if err != nil { if err == ErrChunkNotFound { - _, err := ldb.Get(chunks[i].Addr) + _, err := ldb.Get(context.TODO(), chunks[i].Addr) if err != nil { t.Fatalf("couldn't get chunk %v from ldb, got error: %v", i, err) } diff --git a/swarm/storage/mru/doc.go b/swarm/storage/mru/doc.go new file mode 100644 index 000000000000..e1d7c2c34309 --- /dev/null +++ b/swarm/storage/mru/doc.go @@ -0,0 +1,61 @@ +// Package mru defines Mutable resource updates. +// A Mutable Resource is an entity which allows updates to a resource +// without resorting to ENS on each update. +// The update scheme is built on swarm chunks with chunk keys following +// a predictable, versionable pattern. +// +// Updates are defined to be periodic in nature, where the update frequency +// is expressed in seconds. +// +// The root entry of a mutable resource is tied to a unique identifier that +// is deterministically generated out of the metadata content that describes +// the resource. This metadata includes a user-defined resource name, a resource +// start time that indicates when the resource becomes valid, +// the frequency in seconds with which the resource is expected to be updated, both of +// which are stored as little-endian uint64 values in the database (for a +// total of 16 bytes). It also contains the owner's address (ownerAddr) +// This MRU info is stored in a separate content-addressed chunk +// (call it the metadata chunk), with the following layout: +// +// (00|length|startTime|frequency|name|ownerAddr) +// +// (The two first zero-value bytes are used for disambiguation by the chunk validator, +// and update chunk will always have a value > 0 there.) +// +// Each metadata chunk is identified by its rootAddr, calculated as follows: +// metaHash=H(len(metadata), startTime, frequency,name) +// rootAddr = H(metaHash, ownerAddr). +// where H is the SHA3 hash function +// This scheme effectively locks the root chunk so that only the owner of the private key +// that ownerAddr was derived from can sign updates. +// +// The root entry tells the requester from when the mutable resource was +// first added (Unix time in seconds) and in which moments to look for the +// actual updates. Thus, a resource update for identifier "føø.bar" +// starting at unix time 1528800000 with frequency 300 (every 5 mins) will have updates on 1528800300, +// 1528800600, 1528800900 and so on. +// +// Actual data updates are also made in the form of swarm chunks. The keys +// of the updates are the hash of a concatenation of properties as follows: +// +// updateAddr = H(period, version, rootAddr) +// where H is the SHA3 hash function +// The period is (currentTime - startTime) / frequency +// +// Using our previous example, this means that a period 3 will happen when the +// clock hits 1528800900 +// +// If more than one update is made in the same period, incremental +// version numbers are used successively. +// +// A user looking up a resource would only need to know the rootAddr in order to get the versions +// +// the resource update data is: +// resourcedata = headerlength|period|version|rootAddr|flags|metaHash +// where flags is a 1-byte flags field. Flag 0 is set to 1 to indicate multihash +// +// the full update data that goes in the chunk payload is: +// resourcedata|sign(resourcedata) +// +// headerlength is a 16 bit value containing the byte length of period|version|rootAddr|flags|metaHash +package mru diff --git a/swarm/storage/mru/error.go b/swarm/storage/mru/error.go index bf33e6540bdf..18ab52558259 100644 --- a/swarm/storage/mru/error.go +++ b/swarm/storage/mru/error.go @@ -16,6 +16,10 @@ package mru +import ( + "fmt" +) + const ( ErrInit = iota ErrNotFound @@ -30,3 +34,40 @@ const ( ErrPeriodDepth ErrCnt ) + +// Error is a the typed error object used for Mutable Resources +type Error struct { + code int + err string +} + +// Error implements the error interface +func (e *Error) Error() string { + return e.err +} + +// Code returns the error code +// Error codes are enumerated in the error.go file within the mru package +func (e *Error) Code() int { + return e.code +} + +// NewError creates a new Mutable Resource Error object with the specified code and custom error message +func NewError(code int, s string) error { + if code < 0 || code >= ErrCnt { + panic("no such error code!") + } + r := &Error{ + err: s, + } + switch code { + case ErrNotFound, ErrIO, ErrUnauthorized, ErrInvalidValue, ErrDataOverflow, ErrNothingToReturn, ErrInvalidSignature, ErrNotSynced, ErrPeriodDepth, ErrCorruptData: + r.code = code + } + return r +} + +// NewErrorf is a convenience version of NewError that incorporates printf-style formatting +func NewErrorf(code int, format string, args ...interface{}) error { + return NewError(code, fmt.Sprintf(format, args...)) +} diff --git a/swarm/storage/mru/handler.go b/swarm/storage/mru/handler.go new file mode 100644 index 000000000000..57561fd14be2 --- /dev/null +++ b/swarm/storage/mru/handler.go @@ -0,0 +1,505 @@ +// Copyright 2018 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +// Handler is the API for Mutable Resources +// It enables creating, updating, syncing and retrieving resources and their update data +package mru + +import ( + "bytes" + "context" + "sync" + "time" + "unsafe" + + "github.com/ethereum/go-ethereum/swarm/chunk" + "github.com/ethereum/go-ethereum/swarm/log" + "github.com/ethereum/go-ethereum/swarm/storage" +) + +type Handler struct { + chunkStore *storage.NetStore + HashSize int + resources map[uint64]*resource + resourceLock sync.RWMutex + storeTimeout time.Duration + queryMaxPeriods uint32 +} + +// HandlerParams pass parameters to the Handler constructor NewHandler +// Signer and TimestampProvider are mandatory parameters +type HandlerParams struct { + QueryMaxPeriods uint32 +} + +// hashPool contains a pool of ready hashers +var hashPool sync.Pool +var minimumChunkLength int + +// init initializes the package and hashPool +func init() { + hashPool = sync.Pool{ + New: func() interface{} { + return storage.MakeHashFunc(resourceHashAlgorithm)() + }, + } + if minimumMetadataLength < minimumUpdateDataLength { + minimumChunkLength = minimumMetadataLength + } else { + minimumChunkLength = minimumUpdateDataLength + } +} + +// NewHandler creates a new Mutable Resource API +func NewHandler(params *HandlerParams) *Handler { + rh := &Handler{ + resources: make(map[uint64]*resource), + storeTimeout: defaultStoreTimeout, + queryMaxPeriods: params.QueryMaxPeriods, + } + + for i := 0; i < hasherCount; i++ { + hashfunc := storage.MakeHashFunc(resourceHashAlgorithm)() + if rh.HashSize == 0 { + rh.HashSize = hashfunc.Size() + } + hashPool.Put(hashfunc) + } + + return rh +} + +// SetStore sets the store backend for the Mutable Resource API +func (h *Handler) SetStore(store *storage.NetStore) { + h.chunkStore = store +} + +// Validate is a chunk validation method +// If it looks like a resource update, the chunk address is checked against the ownerAddr of the update's signature +// It implements the storage.ChunkValidator interface +func (h *Handler) Validate(chunkAddr storage.Address, data []byte) bool { + dataLength := len(data) + if dataLength < minimumChunkLength || dataLength > chunk.DefaultSize+8 { + return false + } + + //metadata chunks have the first two bytes set to zero + if data[0] == 0 && data[1] == 0 && dataLength >= minimumMetadataLength { + //metadata chunk + rootAddr, _ := metadataHash(data) + valid := bytes.Equal(chunkAddr, rootAddr) + if !valid { + log.Debug("Invalid root metadata chunk with address", "addr", chunkAddr.Hex()) + } + return valid + } + + // if it is not a metadata chunk, check if it is a properly formatted update chunk with + // valid signature and proof of ownership of the resource it is trying + // to update + + // First, deserialize the chunk + var r SignedResourceUpdate + if err := r.fromChunk(chunkAddr, data); err != nil { + log.Debug("Invalid resource chunk", "addr", chunkAddr.Hex(), "err", err.Error()) + return false + } + + // check that the lookup information contained in the chunk matches the updateAddr (chunk search key) + // that was used to retrieve this chunk + // if this validation fails, someone forged a chunk. + if !bytes.Equal(chunkAddr, r.updateHeader.UpdateAddr()) { + log.Debug("period,version,rootAddr contained in update chunk do not match updateAddr", "addr", chunkAddr.Hex()) + return false + } + + // Verify signatures and that the signer actually owns the resource + // If it fails, it means either the signature is not valid, data is corrupted + // or someone is trying to update someone else's resource. + if err := r.Verify(); err != nil { + log.Debug("Invalid signature", "err", err) + return false + } + + return true +} + +// GetContent retrieves the data payload of the last synced update of the Mutable Resource +func (h *Handler) GetContent(rootAddr storage.Address) (storage.Address, []byte, error) { + rsrc := h.get(rootAddr) + if rsrc == nil || !rsrc.isSynced() { + return nil, nil, NewError(ErrNotFound, " does not exist or is not synced") + } + return rsrc.lastKey, rsrc.data, nil +} + +// GetLastPeriod retrieves the period of the last synced update of the Mutable Resource +func (h *Handler) GetLastPeriod(rootAddr storage.Address) (uint32, error) { + rsrc := h.get(rootAddr) + if rsrc == nil { + return 0, NewError(ErrNotFound, " does not exist") + } else if !rsrc.isSynced() { + return 0, NewError(ErrNotSynced, " is not synced") + } + return rsrc.period, nil +} + +// GetVersion retrieves the period of the last synced update of the Mutable Resource +func (h *Handler) GetVersion(rootAddr storage.Address) (uint32, error) { + rsrc := h.get(rootAddr) + if rsrc == nil { + return 0, NewError(ErrNotFound, " does not exist") + } else if !rsrc.isSynced() { + return 0, NewError(ErrNotSynced, " is not synced") + } + return rsrc.version, nil +} + +// New creates a new metadata chunk out of the request passed in. +func (h *Handler) New(ctx context.Context, request *Request) error { + + // frequency 0 is invalid + if request.metadata.Frequency == 0 { + return NewError(ErrInvalidValue, "frequency cannot be 0 when creating a resource") + } + + // make sure owner is set to something + if request.metadata.Owner == zeroAddr { + return NewError(ErrInvalidValue, "ownerAddr must be set to create a new metadata chunk") + } + + // create the meta chunk and store it in swarm + chunk, metaHash, err := request.metadata.newChunk() + if err != nil { + return err + } + if request.metaHash != nil && !bytes.Equal(request.metaHash, metaHash) || + request.rootAddr != nil && !bytes.Equal(request.rootAddr, chunk.Addr) { + return NewError(ErrInvalidValue, "metaHash in UpdateRequest does not match actual metadata") + } + + request.metaHash = metaHash + request.rootAddr = chunk.Addr + + h.chunkStore.Put(ctx, chunk) + log.Debug("new resource", "name", request.metadata.Name, "startTime", request.metadata.StartTime, "frequency", request.metadata.Frequency, "owner", request.metadata.Owner) + + // create the internal index for the resource and populate it with its metadata + rsrc := &resource{ + resourceUpdate: resourceUpdate{ + updateHeader: updateHeader{ + UpdateLookup: UpdateLookup{ + rootAddr: chunk.Addr, + }, + }, + }, + ResourceMetadata: request.metadata, + updated: time.Now(), + } + h.set(chunk.Addr, rsrc) + + return nil +} + +// NewUpdateRequest prepares an UpdateRequest structure with all the necessary information to +// just add the desired data and sign it. +// The resulting structure can then be signed and passed to Handler.Update to be verified and sent +func (h *Handler) NewUpdateRequest(ctx context.Context, rootAddr storage.Address) (updateRequest *Request, err error) { + + if rootAddr == nil { + return nil, NewError(ErrInvalidValue, "rootAddr cannot be nil") + } + + // Make sure we have a cache of the metadata chunk + rsrc, err := h.Load(ctx, rootAddr) + if err != nil { + return nil, err + } + + now := TimestampProvider.Now() + + updateRequest = new(Request) + updateRequest.period, err = getNextPeriod(rsrc.StartTime.Time, now.Time, rsrc.Frequency) + if err != nil { + return nil, err + } + + if _, err = h.lookup(rsrc, LookupLatestVersionInPeriod(rsrc.rootAddr, updateRequest.period)); err != nil { + if err.(*Error).code != ErrNotFound { + return nil, err + } + // not finding updates means that there is a network error + // or that the resource really does not have updates in this period. + } + + updateRequest.multihash = rsrc.multihash + updateRequest.rootAddr = rsrc.rootAddr + updateRequest.metaHash = rsrc.metaHash + updateRequest.metadata = rsrc.ResourceMetadata + + // if we already have an update for this period then increment version + // resource object MUST be in sync for version to be correct, but we checked this earlier in the method already + if h.hasUpdate(rootAddr, updateRequest.period) { + updateRequest.version = rsrc.version + 1 + } else { + updateRequest.version = 1 + } + + return updateRequest, nil +} + +// Lookup retrieves a specific or latest version of the resource update with metadata chunk at params.Root +// Lookup works differently depending on the configuration of `LookupParams` +// See the `LookupParams` documentation and helper functions: +// `LookupLatest`, `LookupLatestVersionInPeriod` and `LookupVersion` +// When looking for the latest update, it starts at the next period after the current time. +// upon failure tries the corresponding keys of each previous period until one is found +// (or startTime is reached, in which case there are no updates). +func (h *Handler) Lookup(ctx context.Context, params *LookupParams) (*resource, error) { + + rsrc := h.get(params.rootAddr) + if rsrc == nil { + return nil, NewError(ErrNothingToReturn, "resource not loaded") + } + return h.lookup(rsrc, params) +} + +// LookupPrevious returns the resource before the one currently loaded in the resource cache +// This is useful where resource updates are used incrementally in contrast to +// merely replacing content. +// Requires a cached resource object to determine the current state of the resource. +func (h *Handler) LookupPrevious(ctx context.Context, params *LookupParams) (*resource, error) { + rsrc := h.get(params.rootAddr) + if rsrc == nil { + return nil, NewError(ErrNothingToReturn, "resource not loaded") + } + if !rsrc.isSynced() { + return nil, NewError(ErrNotSynced, "LookupPrevious requires synced resource.") + } else if rsrc.period == 0 { + return nil, NewError(ErrNothingToReturn, " not found") + } + var version, period uint32 + if rsrc.version > 1 { + version = rsrc.version - 1 + period = rsrc.period + } else if rsrc.period == 1 { + return nil, NewError(ErrNothingToReturn, "Current update is the oldest") + } else { + version = 0 + period = rsrc.period - 1 + } + return h.lookup(rsrc, NewLookupParams(rsrc.rootAddr, period, version, params.Limit)) +} + +// base code for public lookup methods +func (h *Handler) lookup(rsrc *resource, params *LookupParams) (*resource, error) { + + lp := *params + // we can't look for anything without a store + if h.chunkStore == nil { + return nil, NewError(ErrInit, "Call Handler.SetStore() before performing lookups") + } + + var specificperiod bool + if lp.period > 0 { + specificperiod = true + } else { + // get the current time and the next period + now := TimestampProvider.Now() + + var period uint32 + period, err := getNextPeriod(rsrc.StartTime.Time, now.Time, rsrc.Frequency) + if err != nil { + return nil, err + } + lp.period = period + } + + // start from the last possible period, and iterate previous ones + // (unless we want a specific period only) until we find a match. + // If we hit startTime we're out of options + var specificversion bool + if lp.version > 0 { + specificversion = true + } else { + lp.version = 1 + } + + var hops uint32 + if lp.Limit == 0 { + lp.Limit = h.queryMaxPeriods + } + log.Trace("resource lookup", "period", lp.period, "version", lp.version, "limit", lp.Limit) + for lp.period > 0 { + if lp.Limit != 0 && hops > lp.Limit { + return nil, NewErrorf(ErrPeriodDepth, "Lookup exceeded max period hops (%d)", lp.Limit) + } + updateAddr := lp.UpdateAddr() + chunk, err := h.chunkStore.GetWithTimeout(context.TODO(), updateAddr, defaultRetrieveTimeout) + if err == nil { + if specificversion { + return h.updateIndex(rsrc, chunk) + } + // check if we have versions > 1. If a version fails, the previous version is used and returned. + log.Trace("rsrc update version 1 found, checking for version updates", "period", lp.period, "updateAddr", updateAddr) + for { + newversion := lp.version + 1 + updateAddr := lp.UpdateAddr() + newchunk, err := h.chunkStore.GetWithTimeout(context.TODO(), updateAddr, defaultRetrieveTimeout) + if err != nil { + return h.updateIndex(rsrc, chunk) + } + chunk = newchunk + lp.version = newversion + log.Trace("version update found, checking next", "version", lp.version, "period", lp.period, "updateAddr", updateAddr) + } + } + if specificperiod { + break + } + log.Trace("rsrc update not found, checking previous period", "period", lp.period, "updateAddr", updateAddr) + lp.period-- + hops++ + } + return nil, NewError(ErrNotFound, "no updates found") +} + +// Load retrieves the Mutable Resource metadata chunk stored at rootAddr +// Upon retrieval it creates/updates the index entry for it with metadata corresponding to the chunk contents +func (h *Handler) Load(ctx context.Context, rootAddr storage.Address) (*resource, error) { + chunk, err := h.chunkStore.GetWithTimeout(ctx, rootAddr, defaultRetrieveTimeout) + if err != nil { + return nil, NewError(ErrNotFound, err.Error()) + } + + // create the index entry + rsrc := &resource{} + + if err := rsrc.ResourceMetadata.binaryGet(chunk.SData); err != nil { // Will fail if this is not really a metadata chunk + return nil, err + } + + rsrc.rootAddr, rsrc.metaHash = metadataHash(chunk.SData) + if !bytes.Equal(rsrc.rootAddr, rootAddr) { + return nil, NewError(ErrCorruptData, "Corrupt metadata chunk") + } + h.set(rootAddr, rsrc) + log.Trace("resource index load", "rootkey", rootAddr, "name", rsrc.ResourceMetadata.Name, "starttime", rsrc.ResourceMetadata.StartTime, "frequency", rsrc.ResourceMetadata.Frequency) + return rsrc, nil +} + +// update mutable resource index map with specified content +func (h *Handler) updateIndex(rsrc *resource, chunk *storage.Chunk) (*resource, error) { + + // retrieve metadata from chunk data and check that it matches this mutable resource + var r SignedResourceUpdate + if err := r.fromChunk(chunk.Addr, chunk.SData); err != nil { + return nil, err + } + log.Trace("resource index update", "name", rsrc.ResourceMetadata.Name, "updatekey", chunk.Addr, "period", r.period, "version", r.version) + + // update our rsrcs entry map + rsrc.lastKey = chunk.Addr + rsrc.period = r.period + rsrc.version = r.version + rsrc.updated = time.Now() + rsrc.data = make([]byte, len(r.data)) + rsrc.multihash = r.multihash + copy(rsrc.data, r.data) + rsrc.Reader = bytes.NewReader(rsrc.data) + log.Debug("resource synced", "name", rsrc.ResourceMetadata.Name, "updateAddr", chunk.Addr, "period", rsrc.period, "version", rsrc.version) + h.set(chunk.Addr, rsrc) + return rsrc, nil +} + +// Update adds an actual data update +// Uses the Mutable Resource metadata currently loaded in the resources map entry. +// It is the caller's responsibility to make sure that this data is not stale. +// Note that a Mutable Resource update cannot span chunks, and thus has a MAX NET LENGTH 4096, INCLUDING update header data and signature. An error will be returned if the total length of the chunk payload will exceed this limit. +// Update can only check if the caller is trying to overwrite the very last known version, otherwise it just puts the update +// on the network. +func (h *Handler) Update(ctx context.Context, r *SignedResourceUpdate) (storage.Address, error) { + return h.update(ctx, r) +} + +// create and commit an update +func (h *Handler) update(ctx context.Context, r *SignedResourceUpdate) (updateAddr storage.Address, err error) { + + // we can't update anything without a store + if h.chunkStore == nil { + return nil, NewError(ErrInit, "Call Handler.SetStore() before updating") + } + + rsrc := h.get(r.rootAddr) + if rsrc != nil && rsrc.period != 0 && rsrc.version != 0 && // This is the only cheap check we can do for sure + rsrc.period == r.period && rsrc.version >= r.version { // without having to lookup update chunks + + return nil, NewError(ErrInvalidValue, "A former update in this period is already known to exist") + } + + chunk, err := r.toChunk() // Serialize the update into a chunk. Fails if data is too big + if err != nil { + return nil, err + } + + // send the chunk + h.chunkStore.Put(ctx, chunk) + log.Trace("resource update", "updateAddr", r.updateAddr, "lastperiod", r.period, "version", r.version, "data", chunk.SData, "multihash", r.multihash) + + // update our resources map entry if the new update is older than the one we have, if we have it. + if rsrc != nil && (r.period > rsrc.period || (rsrc.period == r.period && r.version > rsrc.version)) { + rsrc.period = r.period + rsrc.version = r.version + rsrc.data = make([]byte, len(r.data)) + rsrc.updated = time.Now() + rsrc.lastKey = r.updateAddr + rsrc.multihash = r.multihash + copy(rsrc.data, r.data) + rsrc.Reader = bytes.NewReader(rsrc.data) + } + return r.updateAddr, nil +} + +// Retrieves the resource index value for the given nameHash +func (h *Handler) get(rootAddr storage.Address) *resource { + if len(rootAddr) < storage.KeyLength { + log.Warn("Handler.get with invalid rootAddr") + return nil + } + hashKey := *(*uint64)(unsafe.Pointer(&rootAddr[0])) + h.resourceLock.RLock() + defer h.resourceLock.RUnlock() + rsrc := h.resources[hashKey] + return rsrc +} + +// Sets the resource index value for the given nameHash +func (h *Handler) set(rootAddr storage.Address, rsrc *resource) { + if len(rootAddr) < storage.KeyLength { + log.Warn("Handler.set with invalid rootAddr") + return + } + hashKey := *(*uint64)(unsafe.Pointer(&rootAddr[0])) + h.resourceLock.Lock() + defer h.resourceLock.Unlock() + h.resources[hashKey] = rsrc +} + +// Checks if we already have an update on this resource, according to the value in the current state of the resource index +func (h *Handler) hasUpdate(rootAddr storage.Address, period uint32) bool { + rsrc := h.get(rootAddr) + return rsrc != nil && rsrc.period == period +} diff --git a/swarm/storage/mru/lookup.go b/swarm/storage/mru/lookup.go new file mode 100644 index 000000000000..eb28336e124c --- /dev/null +++ b/swarm/storage/mru/lookup.go @@ -0,0 +1,117 @@ +// Copyright 2018 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package mru + +import ( + "encoding/binary" + "hash" + + "github.com/ethereum/go-ethereum/swarm/storage" +) + +// LookupParams is used to specify constraints when performing an update lookup +// Limit defines whether or not the lookup should be limited +// If Limit is set to true then Max defines the amount of hops that can be performed +type LookupParams struct { + UpdateLookup + Limit uint32 +} + +// RootAddr returns the metadata chunk address +func (r *LookupParams) RootAddr() storage.Address { + return r.rootAddr +} + +func NewLookupParams(rootAddr storage.Address, period, version uint32, limit uint32) *LookupParams { + return &LookupParams{ + UpdateLookup: UpdateLookup{ + period: period, + version: version, + rootAddr: rootAddr, + }, + Limit: limit, + } +} + +// LookupLatest generates lookup parameters that look for the latest version of a resource +func LookupLatest(rootAddr storage.Address) *LookupParams { + return NewLookupParams(rootAddr, 0, 0, 0) +} + +// LookupLatestVersionInPeriod generates lookup parameters that look for the latest version of a resource in a given period +func LookupLatestVersionInPeriod(rootAddr storage.Address, period uint32) *LookupParams { + return NewLookupParams(rootAddr, period, 0, 0) +} + +// LookupVersion generates lookup parameters that look for a specific version of a resource +func LookupVersion(rootAddr storage.Address, period, version uint32) *LookupParams { + return NewLookupParams(rootAddr, period, version, 0) +} + +// UpdateLookup represents the components of a resource update search key +type UpdateLookup struct { + period uint32 + version uint32 + rootAddr storage.Address +} + +// 4 bytes period +// 4 bytes version +// storage.Keylength for rootAddr +const updateLookupLength = 4 + 4 + storage.KeyLength + +// UpdateAddr calculates the resource update chunk address corresponding to this lookup key +func (u *UpdateLookup) UpdateAddr() (updateAddr storage.Address) { + serializedData := make([]byte, updateLookupLength) + u.binaryPut(serializedData) + hasher := hashPool.Get().(hash.Hash) + defer hashPool.Put(hasher) + hasher.Reset() + hasher.Write(serializedData) + return hasher.Sum(nil) +} + +// binaryPut serializes this UpdateLookup instance into the provided slice +func (u *UpdateLookup) binaryPut(serializedData []byte) error { + if len(serializedData) != updateLookupLength { + return NewErrorf(ErrInvalidValue, "Incorrect slice size to serialize UpdateLookup. Expected %d, got %d", updateLookupLength, len(serializedData)) + } + if len(u.rootAddr) != storage.KeyLength { + return NewError(ErrInvalidValue, "UpdateLookup.binaryPut called without rootAddr set") + } + binary.LittleEndian.PutUint32(serializedData[:4], u.period) + binary.LittleEndian.PutUint32(serializedData[4:8], u.version) + copy(serializedData[8:], u.rootAddr[:]) + return nil +} + +// binaryLength returns the expected size of this structure when serialized +func (u *UpdateLookup) binaryLength() int { + return updateLookupLength +} + +// binaryGet restores the current instance from the information contained in the passed slice +func (u *UpdateLookup) binaryGet(serializedData []byte) error { + if len(serializedData) != updateLookupLength { + return NewErrorf(ErrInvalidValue, "Incorrect slice size to read UpdateLookup. Expected %d, got %d", updateLookupLength, len(serializedData)) + } + u.period = binary.LittleEndian.Uint32(serializedData[:4]) + u.version = binary.LittleEndian.Uint32(serializedData[4:8]) + u.rootAddr = storage.Address(make([]byte, storage.KeyLength)) + copy(u.rootAddr[:], serializedData[8:]) + return nil +} diff --git a/swarm/storage/mru/lookup_test.go b/swarm/storage/mru/lookup_test.go new file mode 100644 index 000000000000..b66b200a36d7 --- /dev/null +++ b/swarm/storage/mru/lookup_test.go @@ -0,0 +1,85 @@ +package mru + +import ( + "bytes" + "testing" + + "github.com/ethereum/go-ethereum/common/hexutil" +) + +func getTestUpdateLookup() *UpdateLookup { + metadata := *getTestMetadata() + rootAddr, _, _, _ := metadata.serializeAndHash() + return &UpdateLookup{ + period: 79, + version: 2010, + rootAddr: rootAddr, + } +} + +func compareUpdateLookup(a, b *UpdateLookup) bool { + return a.version == b.version && + a.period == b.period && + bytes.Equal(a.rootAddr, b.rootAddr) +} + +func TestUpdateLookupUpdateAddr(t *testing.T) { + ul := getTestUpdateLookup() + updateAddr := ul.UpdateAddr() + compareByteSliceToExpectedHex(t, "updateAddr", updateAddr, "0x8fbc8d4777ef6da790257eda80ab4321fabd08cbdbe67e4e3da6caca386d64e0") +} + +func TestUpdateLookupSerializer(t *testing.T) { + serializedUpdateLookup := make([]byte, updateLookupLength) + ul := getTestUpdateLookup() + if err := ul.binaryPut(serializedUpdateLookup); err != nil { + t.Fatal(err) + } + compareByteSliceToExpectedHex(t, "serializedUpdateLookup", serializedUpdateLookup, "0x4f000000da070000fb0ed7efa696bdb0b54cd75554cc3117ffc891454317df7dd6fefad978e2f2fb") + + // set receiving slice to the wrong size + serializedUpdateLookup = make([]byte, updateLookupLength+7) + if err := ul.binaryPut(serializedUpdateLookup); err == nil { + t.Fatalf("Expected UpdateLookup.binaryPut to fail when receiving slice has a length != %d", updateLookupLength) + } + + // set rootAddr to an invalid length + ul.rootAddr = []byte{1, 2, 3, 4} + serializedUpdateLookup = make([]byte, updateLookupLength) + if err := ul.binaryPut(serializedUpdateLookup); err == nil { + t.Fatal("Expected UpdateLookup.binaryPut to fail when rootAddr is not of the correct size") + } +} + +func TestUpdateLookupDeserializer(t *testing.T) { + serializedUpdateLookup, _ := hexutil.Decode("0x4f000000da070000fb0ed7efa696bdb0b54cd75554cc3117ffc891454317df7dd6fefad978e2f2fb") + var recoveredUpdateLookup UpdateLookup + if err := recoveredUpdateLookup.binaryGet(serializedUpdateLookup); err != nil { + t.Fatal(err) + } + originalUpdateLookup := *getTestUpdateLookup() + if !compareUpdateLookup(&originalUpdateLookup, &recoveredUpdateLookup) { + t.Fatalf("Expected recovered UpdateLookup to match") + } + + // set source slice to the wrong size + serializedUpdateLookup = make([]byte, updateLookupLength+4) + if err := recoveredUpdateLookup.binaryGet(serializedUpdateLookup); err == nil { + t.Fatalf("Expected UpdateLookup.binaryGet to fail when source slice has a length != %d", updateLookupLength) + } +} + +func TestUpdateLookupSerializeDeserialize(t *testing.T) { + serializedUpdateLookup := make([]byte, updateLookupLength) + originalUpdateLookup := getTestUpdateLookup() + if err := originalUpdateLookup.binaryPut(serializedUpdateLookup); err != nil { + t.Fatal(err) + } + var recoveredUpdateLookup UpdateLookup + if err := recoveredUpdateLookup.binaryGet(serializedUpdateLookup); err != nil { + t.Fatal(err) + } + if !compareUpdateLookup(originalUpdateLookup, &recoveredUpdateLookup) { + t.Fatalf("Expected recovered UpdateLookup to match") + } +} diff --git a/swarm/storage/mru/metadata.go b/swarm/storage/mru/metadata.go new file mode 100644 index 000000000000..0ab0ed1d9e4a --- /dev/null +++ b/swarm/storage/mru/metadata.go @@ -0,0 +1,189 @@ +// Copyright 2018 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package mru + +import ( + "encoding/binary" + "hash" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/swarm/storage" +) + +// ResourceMetadata encapsulates the immutable information about a mutable resource :) +// once serialized into a chunk, the resource can be retrieved by knowing its content-addressed rootAddr +type ResourceMetadata struct { + StartTime Timestamp // time at which the resource starts to be valid + Frequency uint64 // expected update frequency for the resource + Name string // name of the resource, for the reference of the user or to disambiguate resources with same starttime, frequency, owneraddr + Owner common.Address // public address of the resource owner +} + +const frequencyLength = 8 // sizeof(uint64) +const nameLengthLength = 1 + +// Resource metadata chunk layout: +// 4 prefix bytes (chunkPrefixLength). The first two set to zero. The second two indicate the length +// Timestamp: timestampLength bytes +// frequency: frequencyLength bytes +// name length: nameLengthLength bytes +// name (variable length, can be empty, up to 255 bytes) +// ownerAddr: common.AddressLength +const minimumMetadataLength = chunkPrefixLength + timestampLength + frequencyLength + nameLengthLength + common.AddressLength + +// binaryGet populates the resource metadata from a byte array +func (r *ResourceMetadata) binaryGet(serializedData []byte) error { + if len(serializedData) < minimumMetadataLength { + return NewErrorf(ErrInvalidValue, "Metadata chunk to deserialize is too short. Expected at least %d. Got %d.", minimumMetadataLength, len(serializedData)) + } + + // first two bytes must be set to zero to indicate metadata chunks, so enforce this. + if serializedData[0] != 0 || serializedData[1] != 0 { + return NewError(ErrCorruptData, "Invalid metadata chunk") + } + + cursor := 2 + metadataLength := int(binary.LittleEndian.Uint16(serializedData[cursor : cursor+2])) // metadataLength does not include the 4 prefix bytes + if metadataLength+chunkPrefixLength != len(serializedData) { + return NewErrorf(ErrCorruptData, "Incorrect declared metadata length. Expected %d, got %d.", metadataLength+chunkPrefixLength, len(serializedData)) + } + + cursor += 2 + + if err := r.StartTime.binaryGet(serializedData[cursor : cursor+timestampLength]); err != nil { + return err + } + cursor += timestampLength + + r.Frequency = binary.LittleEndian.Uint64(serializedData[cursor : cursor+frequencyLength]) + cursor += frequencyLength + + nameLength := int(serializedData[cursor]) + if nameLength+minimumMetadataLength > len(serializedData) { + return NewErrorf(ErrInvalidValue, "Metadata chunk to deserialize is too short when decoding resource name. Expected at least %d. Got %d.", nameLength+minimumMetadataLength, len(serializedData)) + } + cursor++ + r.Name = string(serializedData[cursor : cursor+nameLength]) + cursor += nameLength + + copy(r.Owner[:], serializedData[cursor:]) + cursor += common.AddressLength + if cursor != len(serializedData) { + return NewErrorf(ErrInvalidValue, "Metadata chunk has leftover data after deserialization. %d left to read", len(serializedData)-cursor) + } + return nil +} + +// binaryPut encodes the metadata into a byte array +func (r *ResourceMetadata) binaryPut(serializedData []byte) error { + metadataChunkLength := r.binaryLength() + if len(serializedData) != metadataChunkLength { + return NewErrorf(ErrInvalidValue, "Need a slice of exactly %d bytes to serialize this metadata, but got a slice of size %d.", metadataChunkLength, len(serializedData)) + } + + // root chunk has first two bytes both set to 0, which distinguishes from update bytes + // therefore, skip the first two bytes of a zero-initialized array. + cursor := 2 + binary.LittleEndian.PutUint16(serializedData[cursor:cursor+2], uint16(metadataChunkLength-chunkPrefixLength)) // metadataLength does not include the 4 prefix bytes + cursor += 2 + + r.StartTime.binaryPut(serializedData[cursor : cursor+timestampLength]) + cursor += timestampLength + + binary.LittleEndian.PutUint64(serializedData[cursor:cursor+frequencyLength], r.Frequency) + cursor += frequencyLength + + // Encode the name string as a 1 byte length followed by the encoded string. + // Longer strings will be truncated. + nameLength := len(r.Name) + if nameLength > 255 { + nameLength = 255 + } + serializedData[cursor] = uint8(nameLength) + cursor++ + copy(serializedData[cursor:cursor+nameLength], []byte(r.Name[:nameLength])) + cursor += nameLength + + copy(serializedData[cursor:cursor+common.AddressLength], r.Owner[:]) + cursor += common.AddressLength + + return nil +} + +func (r *ResourceMetadata) binaryLength() int { + return minimumMetadataLength + len(r.Name) +} + +// serializeAndHash returns the root chunk addr and metadata hash that help identify and ascertain ownership of this resource +// returns the serialized metadata as a byproduct of having to hash it. +func (r *ResourceMetadata) serializeAndHash() (rootAddr, metaHash []byte, chunkData []byte, err error) { + + chunkData = make([]byte, r.binaryLength()) + if err := r.binaryPut(chunkData); err != nil { + return nil, nil, nil, err + } + rootAddr, metaHash = metadataHash(chunkData) + return rootAddr, metaHash, chunkData, nil + +} + +// creates a metadata chunk out of a resourceMetadata structure +func (metadata *ResourceMetadata) newChunk() (chunk *storage.Chunk, metaHash []byte, err error) { + // the metadata chunk contains a timestamp of when the resource starts to be valid + // and also how frequently it is expected to be updated + // from this we know at what time we should look for updates, and how often + // it also contains the name of the resource, so we know what resource we are working with + + // the key (rootAddr) of the metadata chunk is content-addressed + // if it wasn't we couldn't replace it later + // resolving this relationship is left up to external agents (for example ENS) + rootAddr, metaHash, chunkData, err := metadata.serializeAndHash() + if err != nil { + return nil, nil, err + } + + // make the chunk and send it to swarm + chunk = storage.NewChunk(rootAddr, nil) + chunk.SData = chunkData + chunk.Size = int64(len(chunkData)) + + return chunk, metaHash, nil +} + +// metadataHash returns the metadata chunk root address and metadata hash +// that help identify and ascertain ownership of this resource +// We compute it as rootAddr = H(ownerAddr, H(metadata)) +// Where H() is SHA3 +// metadata are all the metadata fields, except ownerAddr +// ownerAddr is the public address of the resource owner +// Update chunks must carry a rootAddr reference and metaHash in order to be verified +// This way, a node that receives an update can check the signature, recover the public address +// and check the ownership by computing H(ownerAddr, metaHash) and comparing it to the rootAddr +// the resource is claiming to update without having to lookup the metadata chunk. +// see verifyResourceOwnerhsip in signedupdate.go +func metadataHash(chunkData []byte) (rootAddr, metaHash []byte) { + hasher := hashPool.Get().(hash.Hash) + defer hashPool.Put(hasher) + hasher.Reset() + hasher.Write(chunkData[:len(chunkData)-common.AddressLength]) + metaHash = hasher.Sum(nil) + hasher.Reset() + hasher.Write(metaHash) + hasher.Write(chunkData[len(chunkData)-common.AddressLength:]) + rootAddr = hasher.Sum(nil) + return +} diff --git a/swarm/storage/mru/metadata_test.go b/swarm/storage/mru/metadata_test.go new file mode 100644 index 000000000000..abbac6e3e441 --- /dev/null +++ b/swarm/storage/mru/metadata_test.go @@ -0,0 +1,126 @@ +// Copyright 2018 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . +package mru + +import ( + "testing" + + "github.com/ethereum/go-ethereum/common/hexutil" +) + +func compareByteSliceToExpectedHex(t *testing.T, variableName string, actualValue []byte, expectedHex string) { + if hexutil.Encode(actualValue) != expectedHex { + t.Fatalf("%s: Expected %s to be %s, got %s", t.Name(), variableName, expectedHex, hexutil.Encode(actualValue)) + } +} + +func getTestMetadata() *ResourceMetadata { + return &ResourceMetadata{ + Name: "world news report, every hour, on the hour", + StartTime: Timestamp{ + Time: 1528880400, + }, + Frequency: 3600, + Owner: newCharlieSigner().Address(), + } +} + +func TestMetadataSerializerDeserializer(t *testing.T) { + metadata := *getTestMetadata() + + rootAddr, metaHash, chunkData, err := metadata.serializeAndHash() // creates hashes and marshals, in one go + if err != nil { + t.Fatal(err) + } + const expectedRootAddr = "0xfb0ed7efa696bdb0b54cd75554cc3117ffc891454317df7dd6fefad978e2f2fb" + const expectedMetaHash = "0xf74a10ce8f26ffc8bfaa07c3031a34b2c61f517955e7deb1592daccf96c69cf0" + const expectedChunkData = "0x00004f0010dd205b00000000100e0000000000002a776f726c64206e657773207265706f72742c20657665727920686f75722c206f6e2074686520686f7572876a8936a7cd0b79ef0735ad0896c1afe278781c" + + compareByteSliceToExpectedHex(t, "rootAddr", rootAddr, expectedRootAddr) + compareByteSliceToExpectedHex(t, "metaHash", metaHash, expectedMetaHash) + compareByteSliceToExpectedHex(t, "chunkData", chunkData, expectedChunkData) + + recoveredMetadata := ResourceMetadata{} + recoveredMetadata.binaryGet(chunkData) + + if recoveredMetadata != metadata { + t.Fatalf("Expected that the recovered metadata equals the marshalled metadata") + } + + // we are going to mess with the data, so create a backup to go back to it for the next test + backup := make([]byte, len(chunkData)) + copy(backup, chunkData) + + chunkData = []byte{1, 2, 3} + if err := recoveredMetadata.binaryGet(chunkData); err == nil { + t.Fatal("Expected binaryGet to fail since chunk is too small") + } + + // restore backup + chunkData = make([]byte, len(backup)) + copy(chunkData, backup) + + // mess with the prefix so it is not zero + chunkData[0] = 7 + chunkData[1] = 9 + + if err := recoveredMetadata.binaryGet(chunkData); err == nil { + t.Fatal("Expected binaryGet to fail since prefix bytes are not zero") + } + + // restore backup + chunkData = make([]byte, len(backup)) + copy(chunkData, backup) + + // mess with the length header to trigger an error + chunkData[2] = 255 + chunkData[3] = 44 + if err := recoveredMetadata.binaryGet(chunkData); err == nil { + t.Fatal("Expected binaryGet to fail since header length does not match") + } + + // restore backup + chunkData = make([]byte, len(backup)) + copy(chunkData, backup) + + // mess with name length header to trigger a chunk too short error + chunkData[20] = 255 + if err := recoveredMetadata.binaryGet(chunkData); err == nil { + t.Fatal("Expected binaryGet to fail since name length is incorrect") + } + + // restore backup + chunkData = make([]byte, len(backup)) + copy(chunkData, backup) + + // mess with name length header to trigger an leftover bytes to read error + chunkData[20] = 3 + if err := recoveredMetadata.binaryGet(chunkData); err == nil { + t.Fatal("Expected binaryGet to fail since name length is too small") + } +} + +func TestMetadataSerializerLengthCheck(t *testing.T) { + metadata := *getTestMetadata() + + // make a slice that is too small to contain the metadata + serializedMetadata := make([]byte, 4) + + if err := metadata.binaryPut(serializedMetadata); err == nil { + t.Fatal("Expected metadata.binaryPut to fail, since target slice is too small") + } + +} diff --git a/swarm/storage/mru/request.go b/swarm/storage/mru/request.go new file mode 100644 index 000000000000..dd71f855d684 --- /dev/null +++ b/swarm/storage/mru/request.go @@ -0,0 +1,297 @@ +// Copyright 2018 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package mru + +import ( + "bytes" + "encoding/json" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/ethereum/go-ethereum/swarm/storage" +) + +// updateRequestJSON represents a JSON-serialized UpdateRequest +type updateRequestJSON struct { + Name string `json:"name,omitempty"` + Frequency uint64 `json:"frequency,omitempty"` + StartTime uint64 `json:"startTime,omitempty"` + Owner string `json:"ownerAddr,omitempty"` + RootAddr string `json:"rootAddr,omitempty"` + MetaHash string `json:"metaHash,omitempty"` + Version uint32 `json:"version,omitempty"` + Period uint32 `json:"period,omitempty"` + Data string `json:"data,omitempty"` + Multihash bool `json:"multiHash"` + Signature string `json:"signature,omitempty"` +} + +// Request represents an update and/or resource create message +type Request struct { + SignedResourceUpdate + metadata ResourceMetadata + isNew bool +} + +var zeroAddr = common.Address{} + +// NewCreateUpdateRequest returns a ready to sign request to create and initialize a resource with data +func NewCreateUpdateRequest(metadata *ResourceMetadata) (*Request, error) { + + request, err := NewCreateRequest(metadata) + if err != nil { + return nil, err + } + + // get the current time + now := TimestampProvider.Now().Time + + request.version = 1 + request.period, err = getNextPeriod(metadata.StartTime.Time, now, metadata.Frequency) + if err != nil { + return nil, err + } + return request, nil +} + +// NewCreateRequest returns a request to create a new resource +func NewCreateRequest(metadata *ResourceMetadata) (request *Request, err error) { + if metadata.StartTime.Time == 0 { // get the current time + metadata.StartTime = TimestampProvider.Now() + } + + if metadata.Owner == zeroAddr { + return nil, NewError(ErrInvalidValue, "OwnerAddr is not set") + } + + request = &Request{ + metadata: *metadata, + } + request.rootAddr, request.metaHash, _, err = request.metadata.serializeAndHash() + request.isNew = true + return request, nil +} + +// Frequency returns the resource's expected update frequency +func (r *Request) Frequency() uint64 { + return r.metadata.Frequency +} + +// Name returns the resource human-readable name +func (r *Request) Name() string { + return r.metadata.Name +} + +// Multihash returns true if the resource data should be interpreted as a multihash +func (r *Request) Multihash() bool { + return r.multihash +} + +// Period returns in which period the resource will be published +func (r *Request) Period() uint32 { + return r.period +} + +// Version returns the resource version to publish +func (r *Request) Version() uint32 { + return r.version +} + +// RootAddr returns the metadata chunk address +func (r *Request) RootAddr() storage.Address { + return r.rootAddr +} + +// StartTime returns the time that the resource was/will be created at +func (r *Request) StartTime() Timestamp { + return r.metadata.StartTime +} + +// Owner returns the resource owner's address +func (r *Request) Owner() common.Address { + return r.metadata.Owner +} + +// Sign executes the signature to validate the resource and sets the owner address field +func (r *Request) Sign(signer Signer) error { + if r.metadata.Owner != zeroAddr && r.metadata.Owner != signer.Address() { + return NewError(ErrInvalidSignature, "Signer does not match current owner of the resource") + } + + if err := r.SignedResourceUpdate.Sign(signer); err != nil { + return err + } + r.metadata.Owner = signer.Address() + return nil +} + +// SetData stores the payload data the resource will be updated with +func (r *Request) SetData(data []byte, multihash bool) { + r.data = data + r.multihash = multihash + r.signature = nil + if !r.isNew { + r.metadata.Frequency = 0 // mark as update + } +} + +func (r *Request) IsNew() bool { + return r.metadata.Frequency > 0 && (r.period <= 1 || r.version <= 1) +} + +func (r *Request) IsUpdate() bool { + return r.signature != nil +} + +// fromJSON takes an update request JSON and populates an UpdateRequest +func (r *Request) fromJSON(j *updateRequestJSON) error { + + r.version = j.Version + r.period = j.Period + r.multihash = j.Multihash + r.metadata.Name = j.Name + r.metadata.Frequency = j.Frequency + r.metadata.StartTime.Time = j.StartTime + + if err := decodeHexArray(r.metadata.Owner[:], j.Owner, "ownerAddr"); err != nil { + return err + } + + var err error + if j.Data != "" { + r.data, err = hexutil.Decode(j.Data) + if err != nil { + return NewError(ErrInvalidValue, "Cannot decode data") + } + } + + var declaredRootAddr storage.Address + var declaredMetaHash []byte + + declaredRootAddr, err = decodeHexSlice(j.RootAddr, storage.KeyLength, "rootAddr") + if err != nil { + return err + } + declaredMetaHash, err = decodeHexSlice(j.MetaHash, 32, "metaHash") + if err != nil { + return err + } + + if r.IsNew() { + // for new resource creation, rootAddr and metaHash are optional because + // we can derive them from the content itself. + // however, if the user sent them, we check them for consistency. + + r.rootAddr, r.metaHash, _, err = r.metadata.serializeAndHash() + if err != nil { + return err + } + if j.RootAddr != "" && !bytes.Equal(declaredRootAddr, r.rootAddr) { + return NewError(ErrInvalidValue, "rootAddr does not match resource metadata") + } + if j.MetaHash != "" && !bytes.Equal(declaredMetaHash, r.metaHash) { + return NewError(ErrInvalidValue, "metaHash does not match resource metadata") + } + + } else { + //Update message + r.rootAddr = declaredRootAddr + r.metaHash = declaredMetaHash + } + + if j.Signature != "" { + sigBytes, err := hexutil.Decode(j.Signature) + if err != nil || len(sigBytes) != signatureLength { + return NewError(ErrInvalidSignature, "Cannot decode signature") + } + r.signature = new(Signature) + r.updateAddr = r.UpdateAddr() + copy(r.signature[:], sigBytes) + } + return nil +} + +func decodeHexArray(dst []byte, src, name string) error { + bytes, err := decodeHexSlice(src, len(dst), name) + if err != nil { + return err + } + if bytes != nil { + copy(dst, bytes) + } + return nil +} + +func decodeHexSlice(src string, expectedLength int, name string) (bytes []byte, err error) { + if src != "" { + bytes, err = hexutil.Decode(src) + if err != nil || len(bytes) != expectedLength { + return nil, NewErrorf(ErrInvalidValue, "Cannot decode %s", name) + } + } + return bytes, nil +} + +// UnmarshalJSON takes a JSON structure stored in a byte array and populates the Request object +// Implements json.Unmarshaler interface +func (r *Request) UnmarshalJSON(rawData []byte) error { + var requestJSON updateRequestJSON + if err := json.Unmarshal(rawData, &requestJSON); err != nil { + return err + } + return r.fromJSON(&requestJSON) +} + +// MarshalJSON takes an update request and encodes it as a JSON structure into a byte array +// Implements json.Marshaler interface +func (r *Request) MarshalJSON() (rawData []byte, err error) { + var signatureString, dataHashString, rootAddrString, metaHashString string + if r.signature != nil { + signatureString = hexutil.Encode(r.signature[:]) + } + if r.data != nil { + dataHashString = hexutil.Encode(r.data) + } + if r.rootAddr != nil { + rootAddrString = hexutil.Encode(r.rootAddr) + } + if r.metaHash != nil { + metaHashString = hexutil.Encode(r.metaHash) + } + var ownerAddrString string + if r.metadata.Frequency == 0 { + ownerAddrString = "" + } else { + ownerAddrString = hexutil.Encode(r.metadata.Owner[:]) + } + + requestJSON := &updateRequestJSON{ + Name: r.metadata.Name, + Frequency: r.metadata.Frequency, + StartTime: r.metadata.StartTime.Time, + Version: r.version, + Period: r.period, + Owner: ownerAddrString, + Data: dataHashString, + Multihash: r.multihash, + Signature: signatureString, + RootAddr: rootAddrString, + MetaHash: metaHashString, + } + + return json.Marshal(requestJSON) +} diff --git a/swarm/storage/mru/request_test.go b/swarm/storage/mru/request_test.go new file mode 100644 index 000000000000..dba55b27e31e --- /dev/null +++ b/swarm/storage/mru/request_test.go @@ -0,0 +1,175 @@ +package mru + +import ( + "encoding/binary" + "encoding/json" + "fmt" + "reflect" + "testing" +) + +func areEqualJSON(s1, s2 string) (bool, error) { + //credit for the trick: turtlemonvh https://gist.github.com/turtlemonvh/e4f7404e28387fadb8ad275a99596f67 + var o1 interface{} + var o2 interface{} + + err := json.Unmarshal([]byte(s1), &o1) + if err != nil { + return false, fmt.Errorf("Error mashalling string 1 :: %s", err.Error()) + } + err = json.Unmarshal([]byte(s2), &o2) + if err != nil { + return false, fmt.Errorf("Error mashalling string 2 :: %s", err.Error()) + } + + return reflect.DeepEqual(o1, o2), nil +} + +// TestEncodingDecodingUpdateRequests ensures that requests are serialized properly +// while also checking cryptographically that only the owner of a resource can update it. +func TestEncodingDecodingUpdateRequests(t *testing.T) { + + signer := newCharlieSigner() //Charlie, our good guy + falseSigner := newBobSigner() //Bob will play the bad guy again + + // Create a resource to our good guy Charlie's name + createRequest, err := NewCreateRequest(&ResourceMetadata{ + Name: "a good resource name", + Frequency: 300, + StartTime: Timestamp{Time: 1528900000}, + Owner: signer.Address()}) + + if err != nil { + t.Fatalf("Error creating resource name: %s", err) + } + + // We now encode the create message to simulate we send it over the wire + messageRawData, err := createRequest.MarshalJSON() + if err != nil { + t.Fatalf("Error encoding create resource request: %s", err) + } + + // ... the message arrives and is decoded... + var recoveredCreateRequest Request + if err := recoveredCreateRequest.UnmarshalJSON(messageRawData); err != nil { + t.Fatalf("Error decoding create resource request: %s", err) + } + + // ... but verification should fail because it is not signed! + if err := recoveredCreateRequest.Verify(); err == nil { + t.Fatal("Expected Verify to fail since the message is not signed") + } + + // We now assume that the resource was created and propagated. With rootAddr we can retrieve the resource metadata + // and recover the information above. To sign an update, we need the rootAddr and the metaHash to construct + // proof of ownership + + metaHash := createRequest.metaHash + rootAddr := createRequest.rootAddr + const expectedSignature = "0x1c2bab66dc4ed63783d62934e3a628e517888d6949aef0349f3bd677121db9aa09bbfb865904e6c50360e209e0fe6fe757f8a2474cf1b34169c99b95e3fd5a5101" + const expectedJSON = `{"rootAddr":"0x6e744a730f7ea0881528576f0354b6268b98e35a6981ef703153ff1b8d32bbef","metaHash":"0x0c0d5c18b89da503af92302a1a64fab6acb60f78e288eb9c3d541655cd359b60","version":1,"period":7,"data":"0x5468697320686f75722773207570646174653a20537761726d2039392e3020686173206265656e2072656c656173656421","multiHash":false}` + + //Put together an unsigned update request that we will serialize to send it to the signer. + data := []byte("This hour's update: Swarm 99.0 has been released!") + request := &Request{ + SignedResourceUpdate: SignedResourceUpdate{ + resourceUpdate: resourceUpdate{ + updateHeader: updateHeader{ + UpdateLookup: UpdateLookup{ + period: 7, + version: 1, + rootAddr: rootAddr, + }, + multihash: false, + metaHash: metaHash, + }, + data: data, + }, + }, + } + + messageRawData, err = request.MarshalJSON() + if err != nil { + t.Fatalf("Error encoding update request: %s", err) + } + + equalJSON, err := areEqualJSON(string(messageRawData), expectedJSON) + if err != nil { + t.Fatalf("Error decoding update request JSON: %s", err) + } + if !equalJSON { + t.Fatalf("Received a different JSON message. Expected %s, got %s", expectedJSON, string(messageRawData)) + } + + // now the encoded message messageRawData is sent over the wire and arrives to the signer + + //Attempt to extract an UpdateRequest out of the encoded message + var recoveredRequest Request + if err := recoveredRequest.UnmarshalJSON(messageRawData); err != nil { + t.Fatalf("Error decoding update request: %s", err) + } + + //sign the request and see if it matches our predefined signature above. + if err := recoveredRequest.Sign(signer); err != nil { + t.Fatalf("Error signing request: %s", err) + } + + compareByteSliceToExpectedHex(t, "signature", recoveredRequest.signature[:], expectedSignature) + + // mess with the signature and see what happens. To alter the signature, we briefly decode it as JSON + // to alter the signature field. + var j updateRequestJSON + if err := json.Unmarshal([]byte(expectedJSON), &j); err != nil { + t.Fatal("Error unmarshalling test json, check expectedJSON constant") + } + j.Signature = "Certainly not a signature" + corruptMessage, _ := json.Marshal(j) // encode the message with the bad signature + var corruptRequest Request + if err = corruptRequest.UnmarshalJSON(corruptMessage); err == nil { + t.Fatal("Expected DecodeUpdateRequest to fail when trying to interpret a corrupt message with an invalid signature") + } + + // Now imagine Evil Bob (why always Bob, poor Bob) attempts to update Charlie's resource, + // signing a message with his private key + if err := request.Sign(falseSigner); err != nil { + t.Fatalf("Error signing: %s", err) + } + + // Now Bob encodes the message to send it over the wire... + messageRawData, err = request.MarshalJSON() + if err != nil { + t.Fatalf("Error encoding message:%s", err) + } + + // ... the message arrives to our Swarm node and it is decoded. + recoveredRequest = Request{} + if err := recoveredRequest.UnmarshalJSON(messageRawData); err != nil { + t.Fatalf("Error decoding message:%s", err) + } + + // Before discovering Bob's misdemeanor, let's see what would happen if we mess + // with the signature big time to see if Verify catches it + savedSignature := *recoveredRequest.signature // save the signature for later + binary.LittleEndian.PutUint64(recoveredRequest.signature[5:], 556845463424) // write some random data to break the signature + if err = recoveredRequest.Verify(); err == nil { + t.Fatal("Expected Verify to fail on corrupt signature") + } + + // restore the Evil Bob's signature from corruption + *recoveredRequest.signature = savedSignature + + // Now the signature is not corrupt, however Verify should now fail because Bob doesn't own the resource + if err = recoveredRequest.Verify(); err == nil { + t.Fatalf("Expected Verify to fail because this resource belongs to Charlie, not Bob the attacker:%s", err) + } + + // Sign with our friend Charlie's private key + if err := recoveredRequest.Sign(signer); err != nil { + t.Fatalf("Error signing with the correct private key: %s", err) + } + + // And now, Verify should work since this resource belongs to Charlie + if err = recoveredRequest.Verify(); err != nil { + t.Fatalf("Error verifying that Charlie, the good guy, can sign his resource:%s", err) + } +} diff --git a/swarm/storage/mru/resource.go b/swarm/storage/mru/resource.go index 1e92a5e929db..aa83ff62a9a6 100644 --- a/swarm/storage/mru/resource.go +++ b/swarm/storage/mru/resource.go @@ -19,110 +19,29 @@ package mru import ( "bytes" "context" - "encoding/binary" - "errors" - "fmt" - "math/big" - "path/filepath" - "sync" "time" - "golang.org/x/net/idna" - - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/contracts/ens" - "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/crypto" - "github.com/ethereum/go-ethereum/swarm/log" - "github.com/ethereum/go-ethereum/swarm/multihash" "github.com/ethereum/go-ethereum/swarm/storage" ) const ( - signatureLength = 65 - metadataChunkOffsetSize = 18 - DbDirName = "resource" - chunkSize = 4096 // temporary until we implement FileStore in the resourcehandler - defaultStoreTimeout = 4000 * time.Millisecond - hasherCount = 8 - resourceHash = storage.SHA3Hash - defaultRetrieveTimeout = 100 * time.Millisecond + defaultStoreTimeout = 4000 * time.Millisecond + hasherCount = 8 + resourceHashAlgorithm = storage.SHA3Hash + defaultRetrieveTimeout = 100 * time.Millisecond ) -type blockEstimator struct { - Start time.Time - Average time.Duration -} - -// TODO: Average must be adjusted when blockchain connection is present and synced -func NewBlockEstimator() *blockEstimator { - sampleDate, _ := time.Parse(time.RFC3339, "2018-05-04T20:35:22Z") // from etherscan.io - sampleBlock := int64(3169691) // from etherscan.io - ropstenStart, _ := time.Parse(time.RFC3339, "2016-11-20T11:48:50Z") // from etherscan.io - ns := sampleDate.Sub(ropstenStart).Nanoseconds() - period := int(ns / sampleBlock) - parsestring := fmt.Sprintf("%dns", int(float64(period)*1.0005)) // increase the blockcount a little, so we don't overshoot the read block height; if we do, we will never find the updates when getting synced data - periodNs, _ := time.ParseDuration(parsestring) - return &blockEstimator{ - Start: ropstenStart, - Average: periodNs, - } -} - -func (b *blockEstimator) HeaderByNumber(context.Context, string, *big.Int) (*types.Header, error) { - return &types.Header{ - Number: big.NewInt(time.Since(b.Start).Nanoseconds() / b.Average.Nanoseconds()), - }, nil -} - -type Error struct { - code int - err string -} - -func (e *Error) Error() string { - return e.err -} - -func (e *Error) Code() int { - return e.code -} - -func NewError(code int, s string) error { - if code < 0 || code >= ErrCnt { - panic("no such error code!") - } - r := &Error{ - err: s, - } - switch code { - case ErrNotFound, ErrIO, ErrUnauthorized, ErrInvalidValue, ErrDataOverflow, ErrNothingToReturn, ErrInvalidSignature, ErrNotSynced, ErrPeriodDepth, ErrCorruptData: - r.code = code - } - return r -} - -type Signature [signatureLength]byte - -type LookupParams struct { - Limit bool - Max uint32 -} - -// Encapsulates an specific resource update. When synced it contains the most recent -// version of the resource update data. +// resource caches resource data and the metadata of its root chunk. type resource struct { + resourceUpdate + ResourceMetadata *bytes.Reader - Multihash bool - name string - nameHash common.Hash - startBlock uint64 - lastPeriod uint32 - lastKey storage.Address - frequency uint64 - version uint32 - data []byte - updated time.Time + lastKey storage.Address + updated time.Time +} + +func (r *resource) Context() context.Context { + return context.TODO() } // TODO Expire content after a defined period (to force resync) @@ -130,937 +49,28 @@ func (r *resource) isSynced() bool { return !r.updated.IsZero() } -func (r *resource) NameHash() common.Hash { - return r.nameHash -} - -func (r *resource) Size(chan bool) (int64, error) { +// implements storage.LazySectionReader +func (r *resource) Size(ctx context.Context, _ chan bool) (int64, error) { if !r.isSynced() { return 0, NewError(ErrNotSynced, "Not synced") } - return int64(len(r.data)), nil + return int64(len(r.resourceUpdate.data)), nil } +//returns the resource's human-readable name func (r *resource) Name() string { - return r.name -} - -func (r *resource) UnmarshalBinary(data []byte) error { - r.startBlock = binary.LittleEndian.Uint64(data[:8]) - r.frequency = binary.LittleEndian.Uint64(data[8:16]) - r.name = string(data[16:]) - return nil -} - -func (r *resource) MarshalBinary() ([]byte, error) { - b := make([]byte, 16+len(r.name)) - binary.LittleEndian.PutUint64(b, r.startBlock) - binary.LittleEndian.PutUint64(b[8:], r.frequency) - copy(b[16:], []byte(r.name)) - return b, nil -} - -type headerGetter interface { - HeaderByNumber(context.Context, string, *big.Int) (*types.Header, error) -} - -type ownerValidator interface { - ValidateOwner(name string, address common.Address) (bool, error) + return r.ResourceMetadata.Name } -// Mutable resource is an entity which allows updates to a resource -// without resorting to ENS on each update. -// The update scheme is built on swarm chunks with chunk keys following -// a predictable, versionable pattern. -// -// Updates are defined to be periodic in nature, where periods are -// expressed in terms of number of blocks. -// -// The root entry of a mutable resource is tied to a unique identifier, -// typically - but not necessarily - an ens name. The identifier must be -// an valid IDNA string. It also contains the block number -// when the resource update was first registered, and -// the block frequency with which the resource will be updated, both of -// which are stored as little-endian uint64 values in the database (for a -// total of 16 bytes). It also contains the unique identifier. -// It is stored in a separate content-addressed chunk (call it the metadata chunk), -// with the following layout: -// -// (0x0000|startblock|frequency|identifier) -// -// (The two first zero-value bytes are used for disambiguation by the chunk validator, -// and update chunk will always have a value > 0 there.) -// -// The root entry tells the requester from when the mutable resource was -// first added (block number) and in which block number to look for the -// actual updates. Thus, a resource update for identifier "føø.bar" -// starting at block 4200 with frequency 42 will have updates on block 4242, -// 4284, 4326 and so on. -// -// Actual data updates are also made in the form of swarm chunks. The keys -// of the updates are the hash of a concatenation of properties as follows: -// -// sha256(period|version|namehash) -// -// The period is (currentblock - startblock) / frequency -// -// Using our previous example, this means that a period 3 will have 4326 as -// the block number. -// -// If more than one update is made to the same block number, incremental -// version numbers are used successively. -// -// A lookup agent need only know the identifier name in order to get the versions -// -// the resourcedata is: -// headerlength|period|version|identifier|data -// -// if a validator is active, the chunk data is: -// resourcedata|sign(resourcedata) -// otherwise, the chunk data is the same as the resourcedata -// -// headerlength is a 16 bit value containing the byte length of period|version|name -// -// TODO: Include modtime in chunk data + signature -type Handler struct { - chunkStore *storage.NetStore - HashSize int - signer Signer - headerGetter headerGetter - ownerValidator ownerValidator - resources map[string]*resource - hashPool sync.Pool - resourceLock sync.RWMutex - storeTimeout time.Duration - queryMaxPeriods *LookupParams -} - -type HandlerParams struct { - QueryMaxPeriods *LookupParams - Signer Signer - HeaderGetter headerGetter - OwnerValidator ownerValidator -} - -// Create or open resource update chunk store -func NewHandler(params *HandlerParams) (*Handler, error) { - if params.QueryMaxPeriods == nil { - params.QueryMaxPeriods = &LookupParams{ - Limit: false, - } - } - rh := &Handler{ - headerGetter: params.HeaderGetter, - ownerValidator: params.OwnerValidator, - resources: make(map[string]*resource), - storeTimeout: defaultStoreTimeout, - signer: params.Signer, - hashPool: sync.Pool{ - New: func() interface{} { - return storage.MakeHashFunc(resourceHash)() - }, - }, - queryMaxPeriods: params.QueryMaxPeriods, - } - - for i := 0; i < hasherCount; i++ { - hashfunc := storage.MakeHashFunc(resourceHash)() - if rh.HashSize == 0 { - rh.HashSize = hashfunc.Size() - } - rh.hashPool.Put(hashfunc) - } - - return rh, nil -} - -// SetStore sets the store backend for resource updates -func (h *Handler) SetStore(store *storage.NetStore) { - h.chunkStore = store -} - -// Validate is a chunk validation method (matches ChunkValidatorFunc signature) -// -// If resource update, owner is checked against ENS record of resource name inferred from chunk data -// If parsed signature is nil, validates automatically -// If not resource update, it validates are root chunk if length is metadataChunkOffsetSize and first two bytes are 0 -func (h *Handler) Validate(addr storage.Address, data []byte) bool { - signature, period, version, name, parseddata, _, err := h.parseUpdate(data) - if err != nil { - log.Warn(err.Error()) - if len(data) > metadataChunkOffsetSize { // identifier comes after this byte range, and must be at least one byte - if bytes.Equal(data[:2], []byte{0, 0}) { - return true - } - } - log.Error("Invalid resource chunk") - return false - } else if signature == nil { - return bytes.Equal(h.resourceHash(period, version, ens.EnsNode(name)), addr) - } - - digest := h.keyDataHash(addr, parseddata) - addrSig, err := getAddressFromDataSig(digest, *signature) - if err != nil { - log.Error("Invalid signature on resource chunk") - return false - } - ok, _ := h.checkAccess(name, addrSig) - return ok -} - -// If no ens client is supplied, resource updates are not validated -func (h *Handler) IsValidated() bool { - return h.ownerValidator != nil -} - -// Create the resource update digest used in signatures -func (h *Handler) keyDataHash(addr storage.Address, data []byte) common.Hash { - hasher := h.hashPool.Get().(storage.SwarmHash) - defer h.hashPool.Put(hasher) - hasher.Reset() - hasher.Write(addr[:]) - hasher.Write(data) - return common.BytesToHash(hasher.Sum(nil)) -} - -// Checks if current address matches owner address of ENS -func (h *Handler) checkAccess(name string, address common.Address) (bool, error) { - if h.ownerValidator == nil { - return true, nil - } - return h.ownerValidator.ValidateOwner(name, address) -} - -// get data from current resource -func (h *Handler) GetContent(name string) (storage.Address, []byte, error) { - rsrc := h.get(name) - if rsrc == nil || !rsrc.isSynced() { - return nil, nil, NewError(ErrNotFound, " does not exist or is not synced") - } - return rsrc.lastKey, rsrc.data, nil -} - -// Gets the period of the current data loaded in the resource -func (h *Handler) GetLastPeriod(nameHash string) (uint32, error) { - rsrc := h.get(nameHash) - if rsrc == nil { - return 0, NewError(ErrNotFound, " does not exist") - } else if !rsrc.isSynced() { - return 0, NewError(ErrNotSynced, " is not synced") - } - return rsrc.lastPeriod, nil -} - -// Gets the version of the current data loaded in the resource -func (h *Handler) GetVersion(nameHash string) (uint32, error) { - rsrc := h.get(nameHash) - if rsrc == nil { - return 0, NewError(ErrNotFound, " does not exist") - } else if !rsrc.isSynced() { - return 0, NewError(ErrNotSynced, " is not synced") - } - return rsrc.version, nil -} - -// \TODO should be hashsize * branches from the chosen chunker, implement with FileStore -func (h *Handler) chunkSize() int64 { - return chunkSize -} - -// Creates a new root entry for a mutable resource identified by `name` with the specified `frequency`. -// -// The signature data should match the hash of the idna-converted name by the validator's namehash function, NOT the raw name bytes. -// -// The start block of the resource update will be the actual current block height of the connected network. -func (h *Handler) New(ctx context.Context, name string, frequency uint64) (storage.Address, *resource, error) { - - // frequency 0 is invalid - if frequency == 0 { - return nil, nil, NewError(ErrInvalidValue, "Frequency cannot be 0") - } - - // make sure name only contains ascii values - if !isSafeName(name) { - return nil, nil, NewError(ErrInvalidValue, fmt.Sprintf("Invalid name: '%s'", name)) - } - - nameHash := ens.EnsNode(name) - - // if the signer function is set, validate that the key of the signer has access to modify this ENS name - if h.signer != nil { - signature, err := h.signer.Sign(nameHash) - if err != nil { - return nil, nil, NewError(ErrInvalidSignature, fmt.Sprintf("Sign fail: %v", err)) - } - addr, err := getAddressFromDataSig(nameHash, signature) - if err != nil { - return nil, nil, NewError(ErrInvalidSignature, fmt.Sprintf("Retrieve address from signature fail: %v", err)) - } - ok, err := h.checkAccess(name, addr) - if err != nil { - return nil, nil, err - } else if !ok { - return nil, nil, NewError(ErrUnauthorized, fmt.Sprintf("Not owner of '%s'", name)) - } - } - - // get our blockheight at this time - currentblock, err := h.getBlock(ctx, name) - if err != nil { - return nil, nil, err - } - - chunk := h.newMetaChunk(name, currentblock, frequency) - - h.chunkStore.Put(chunk) - log.Debug("new resource", "name", name, "key", nameHash, "startBlock", currentblock, "frequency", frequency) - - // create the internal index for the resource and populate it with the data of the first version - rsrc := &resource{ - startBlock: currentblock, - frequency: frequency, - name: name, - nameHash: nameHash, - updated: time.Now(), - } - h.set(nameHash.Hex(), rsrc) - - return chunk.Addr, rsrc, nil -} - -func (h *Handler) newMetaChunk(name string, startBlock uint64, frequency uint64) *storage.Chunk { - // the metadata chunk points to data of first blockheight + update frequency - // from this we know from what blockheight we should look for updates, and how often - // it also contains the name of the resource, so we know what resource we are working with - data := make([]byte, metadataChunkOffsetSize+len(name)) - - // root block has first two bytes both set to 0, which distinguishes from update bytes - val := make([]byte, 8) - binary.LittleEndian.PutUint64(val, startBlock) - copy(data[2:10], val) - binary.LittleEndian.PutUint64(val, frequency) - copy(data[10:18], val) - copy(data[18:], []byte(name)) - - // the key of the metadata chunk is content-addressed - // if it wasn't we couldn't replace it later - // resolving this relationship is left up to external agents (for example ENS) - hasher := h.hashPool.Get().(storage.SwarmHash) - hasher.Reset() - hasher.Write(data) - key := hasher.Sum(nil) - h.hashPool.Put(hasher) - - // make the chunk and send it to swarm - chunk := storage.NewChunk(key, nil) - chunk.SData = make([]byte, metadataChunkOffsetSize+len(name)) - copy(chunk.SData, data) - return chunk -} - -// Searches and retrieves the specific version of the resource update identified by `name` -// at the specific block height -// -// If refresh is set to true, the resource data will be reloaded from the resource update -// metadata chunk. -// It is the callers responsibility to make sure that this chunk exists (if the resource -// update root data was retrieved externally, it typically doesn't) -func (h *Handler) LookupVersionByName(ctx context.Context, name string, period uint32, version uint32, refresh bool, maxLookup *LookupParams) (*resource, error) { - return h.LookupVersion(ctx, ens.EnsNode(name), period, version, refresh, maxLookup) -} - -func (h *Handler) LookupVersion(ctx context.Context, nameHash common.Hash, period uint32, version uint32, refresh bool, maxLookup *LookupParams) (*resource, error) { - rsrc := h.get(nameHash.Hex()) - if rsrc == nil { - return nil, NewError(ErrNothingToReturn, "resource not loaded") - } - return h.lookup(rsrc, period, version, refresh, maxLookup) -} - -// Retrieves the latest version of the resource update identified by `name` -// at the specified block height -// -// If an update is found, version numbers are iterated until failure, and the last -// successfully retrieved version is copied to the corresponding resources map entry -// and returned. -// -// See also (*Handler).LookupVersion -func (h *Handler) LookupHistoricalByName(ctx context.Context, name string, period uint32, refresh bool, maxLookup *LookupParams) (*resource, error) { - return h.LookupHistorical(ctx, ens.EnsNode(name), period, refresh, maxLookup) -} - -func (h *Handler) LookupHistorical(ctx context.Context, nameHash common.Hash, period uint32, refresh bool, maxLookup *LookupParams) (*resource, error) { - rsrc := h.get(nameHash.Hex()) - if rsrc == nil { - return nil, NewError(ErrNothingToReturn, "resource not loaded") - } - return h.lookup(rsrc, period, 0, refresh, maxLookup) -} - -// Retrieves the latest version of the resource update identified by `name` -// at the next update block height -// -// It starts at the next period after the current block height, and upon failure -// tries the corresponding keys of each previous period until one is found -// (or startBlock is reached, in which case there are no updates). -// -// Version iteration is done as in (*Handler).LookupHistorical -// -// See also (*Handler).LookupHistorical -func (h *Handler) LookupLatestByName(ctx context.Context, name string, refresh bool, maxLookup *LookupParams) (*resource, error) { - return h.LookupLatest(ctx, ens.EnsNode(name), refresh, maxLookup) -} - -func (h *Handler) LookupLatest(ctx context.Context, nameHash common.Hash, refresh bool, maxLookup *LookupParams) (*resource, error) { - - // get our blockheight at this time and the next block of the update period - rsrc := h.get(nameHash.Hex()) - if rsrc == nil { - return nil, NewError(ErrNothingToReturn, "resource not loaded") - } - currentblock, err := h.getBlock(ctx, rsrc.name) - if err != nil { - return nil, err - } - nextperiod, err := getNextPeriod(rsrc.startBlock, currentblock, rsrc.frequency) - if err != nil { - return nil, err - } - return h.lookup(rsrc, nextperiod, 0, refresh, maxLookup) -} - -// Returns the resource before the one currently loaded in the resource index -// -// This is useful where resource updates are used incrementally in contrast to -// merely replacing content. -// -// Requires a synced resource object -func (h *Handler) LookupPreviousByName(ctx context.Context, name string, maxLookup *LookupParams) (*resource, error) { - return h.LookupPrevious(ctx, ens.EnsNode(name), maxLookup) -} - -func (h *Handler) LookupPrevious(ctx context.Context, nameHash common.Hash, maxLookup *LookupParams) (*resource, error) { - rsrc := h.get(nameHash.Hex()) - if rsrc == nil { - return nil, NewError(ErrNothingToReturn, "resource not loaded") - } - if !rsrc.isSynced() { - return nil, NewError(ErrNotSynced, "LookupPrevious requires synced resource.") - } else if rsrc.lastPeriod == 0 { - return nil, NewError(ErrNothingToReturn, " not found") - } - if rsrc.version > 1 { - rsrc.version-- - } else if rsrc.lastPeriod == 1 { - return nil, NewError(ErrNothingToReturn, "Current update is the oldest") - } else { - rsrc.version = 0 - rsrc.lastPeriod-- - } - return h.lookup(rsrc, rsrc.lastPeriod, rsrc.version, false, maxLookup) -} - -// base code for public lookup methods -func (h *Handler) lookup(rsrc *resource, period uint32, version uint32, refresh bool, maxLookup *LookupParams) (*resource, error) { - - // we can't look for anything without a store - if h.chunkStore == nil { - return nil, NewError(ErrInit, "Call Handler.SetStore() before performing lookups") - } - - // period 0 does not exist - if period == 0 { - return nil, NewError(ErrInvalidValue, "period must be >0") - } - - // start from the last possible block period, and iterate previous ones until we find a match - // if we hit startBlock we're out of options - var specificversion bool - if version > 0 { - specificversion = true - } else { - version = 1 - } - - var hops uint32 - if maxLookup == nil { - maxLookup = h.queryMaxPeriods - } - log.Trace("resource lookup", "period", period, "version", version, "limit", maxLookup.Limit, "max", maxLookup.Max) - for period > 0 { - if maxLookup.Limit && hops > maxLookup.Max { - return nil, NewError(ErrPeriodDepth, fmt.Sprintf("Lookup exceeded max period hops (%d)", maxLookup.Max)) - } - key := h.resourceHash(period, version, rsrc.nameHash) - chunk, err := h.chunkStore.GetWithTimeout(key, defaultRetrieveTimeout) - if err == nil { - if specificversion { - return h.updateIndex(rsrc, chunk) - } - // check if we have versions > 1. If a version fails, the previous version is used and returned. - log.Trace("rsrc update version 1 found, checking for version updates", "period", period, "key", key) - for { - newversion := version + 1 - key := h.resourceHash(period, newversion, rsrc.nameHash) - newchunk, err := h.chunkStore.GetWithTimeout(key, defaultRetrieveTimeout) - if err != nil { - return h.updateIndex(rsrc, chunk) - } - chunk = newchunk - version = newversion - log.Trace("version update found, checking next", "version", version, "period", period, "key", key) - } - } - log.Trace("rsrc update not found, checking previous period", "period", period, "key", key) - period-- - hops++ - } - return nil, NewError(ErrNotFound, "no updates found") -} - -// Retrieves a resource metadata chunk and creates/updates the index entry for it -// with the resulting metadata -func (h *Handler) Load(addr storage.Address) (*resource, error) { - chunk, err := h.chunkStore.GetWithTimeout(addr, defaultRetrieveTimeout) - if err != nil { - return nil, NewError(ErrNotFound, err.Error()) - } - - // minimum sanity check for chunk data (an update chunk first two bytes is headerlength uint16, and cannot be 0) - // \TODO this is not enough to make sure the data isn't bogus. A normal content addressed chunk could still satisfy these criteria - if !bytes.Equal(chunk.SData[:2], []byte{0x0, 0x0}) { - return nil, NewError(ErrCorruptData, fmt.Sprintf("Chunk is not a resource metadata chunk")) - } else if len(chunk.SData) <= metadataChunkOffsetSize { - return nil, NewError(ErrNothingToReturn, fmt.Sprintf("Invalid chunk length %d, should be minimum %d", len(chunk.SData), metadataChunkOffsetSize+1)) - } - - // create the index entry - rsrc := &resource{} - rsrc.UnmarshalBinary(chunk.SData[2:]) - rsrc.nameHash = ens.EnsNode(rsrc.name) - h.set(rsrc.nameHash.Hex(), rsrc) - log.Trace("resource index load", "rootkey", addr, "name", rsrc.name, "namehash", rsrc.nameHash, "startblock", rsrc.startBlock, "frequency", rsrc.frequency) - return rsrc, nil -} - -// update mutable resource index map with specified content -func (h *Handler) updateIndex(rsrc *resource, chunk *storage.Chunk) (*resource, error) { - - // retrieve metadata from chunk data and check that it matches this mutable resource - signature, period, version, name, data, multihash, err := h.parseUpdate(chunk.SData) - if rsrc.name != name { - return nil, NewError(ErrNothingToReturn, fmt.Sprintf("Update belongs to '%s', but have '%s'", name, rsrc.name)) - } - log.Trace("resource index update", "name", rsrc.name, "namehash", rsrc.nameHash, "updatekey", chunk.Addr, "period", period, "version", version) - - // check signature (if signer algorithm is present) - // \TODO maybe this check is redundant if also checked upon retrieval of chunk - if signature != nil { - digest := h.keyDataHash(chunk.Addr, data) - _, err = getAddressFromDataSig(digest, *signature) - if err != nil { - return nil, NewError(ErrUnauthorized, fmt.Sprintf("Invalid signature: %v", err)) - } - } - - // update our rsrcs entry map - rsrc.lastKey = chunk.Addr - rsrc.lastPeriod = period - rsrc.version = version - rsrc.updated = time.Now() - rsrc.data = make([]byte, len(data)) - rsrc.Multihash = multihash - rsrc.Reader = bytes.NewReader(rsrc.data) - copy(rsrc.data, data) - log.Debug(" synced", "name", rsrc.name, "key", chunk.Addr, "period", rsrc.lastPeriod, "version", rsrc.version) - h.set(rsrc.nameHash.Hex(), rsrc) - return rsrc, nil -} - -// retrieve update metadata from chunk data -// mirrors newUpdateChunk() -func (h *Handler) parseUpdate(chunkdata []byte) (*Signature, uint32, uint32, string, []byte, bool, error) { - // absolute minimum an update chunk can contain: - // 14 = header + one byte of name + one byte of data - if len(chunkdata) < 14 { - return nil, 0, 0, "", nil, false, NewError(ErrNothingToReturn, "chunk less than 13 bytes cannot be a resource update chunk") - } - cursor := 0 - headerlength := binary.LittleEndian.Uint16(chunkdata[cursor : cursor+2]) - cursor += 2 - datalength := binary.LittleEndian.Uint16(chunkdata[cursor : cursor+2]) - cursor += 2 - var exclsignlength int - // we need extra magic if it's a multihash, since we used datalength 0 in header as an indicator of multihash content - // retrieve the second varint and set this as the data length - // TODO: merge with isMultihash code - if datalength == 0 { - uvarintbuf := bytes.NewBuffer(chunkdata[headerlength+4:]) - r, err := binary.ReadUvarint(uvarintbuf) - if err != nil { - errstr := fmt.Sprintf("corrupt multihash, hash id varint could not be read: %v", err) - log.Warn(errstr) - return nil, 0, 0, "", nil, false, NewError(ErrCorruptData, errstr) - - } - r, err = binary.ReadUvarint(uvarintbuf) - if err != nil { - errstr := fmt.Sprintf("corrupt multihash, hash length field could not be read: %v", err) - log.Warn(errstr) - return nil, 0, 0, "", nil, false, NewError(ErrCorruptData, errstr) - - } - exclsignlength = int(headerlength + uint16(r)) - } else { - exclsignlength = int(headerlength + datalength + 4) - } - - // the total length excluding signature is headerlength and datalength fields plus the length of the header and the data given in these fields - exclsignlength = int(headerlength + datalength + 4) - if exclsignlength > len(chunkdata) || exclsignlength < 14 { - return nil, 0, 0, "", nil, false, NewError(ErrNothingToReturn, fmt.Sprintf("Reported headerlength %d + datalength %d longer than actual chunk data length %d", headerlength, exclsignlength, len(chunkdata))) - } else if exclsignlength < 14 { - return nil, 0, 0, "", nil, false, NewError(ErrNothingToReturn, fmt.Sprintf("Reported headerlength %d + datalength %d is smaller than minimum valid resource chunk length %d", headerlength, datalength, 14)) - } - - // at this point we can be satisfied that the data integrity is ok - var period uint32 - var version uint32 - var name string - var data []byte - period = binary.LittleEndian.Uint32(chunkdata[cursor : cursor+4]) - cursor += 4 - version = binary.LittleEndian.Uint32(chunkdata[cursor : cursor+4]) - cursor += 4 - namelength := int(headerlength) - cursor + 4 - if l := len(chunkdata); l < cursor+namelength { - return nil, 0, 0, "", nil, false, NewError(ErrNothingToReturn, fmt.Sprintf("chunk less than %v bytes is too short to read the name", l)) - } - name = string(chunkdata[cursor : cursor+namelength]) - cursor += namelength - - // if multihash content is indicated we check the validity of the multihash - // \TODO the check above for multihash probably is sufficient also for this case (or can be with a small adjustment) and if so this code should be removed - var intdatalength int - var ismultihash bool - if datalength == 0 { - var intheaderlength int - var err error - intdatalength, intheaderlength, err = multihash.GetMultihashLength(chunkdata[cursor:]) - if err != nil { - log.Error("multihash parse error", "err", err) - return nil, 0, 0, "", nil, false, err - } - intdatalength += intheaderlength - multihashboundary := cursor + intdatalength - if len(chunkdata) != multihashboundary && len(chunkdata) < multihashboundary+signatureLength { - log.Debug("multihash error", "chunkdatalen", len(chunkdata), "multihashboundary", multihashboundary) - return nil, 0, 0, "", nil, false, errors.New("Corrupt multihash data") - } - ismultihash = true - } else { - intdatalength = int(datalength) - } - data = make([]byte, intdatalength) - copy(data, chunkdata[cursor:cursor+intdatalength]) - - // omit signatures if we have no validator - var signature *Signature - cursor += intdatalength - if h.signer != nil { - sigdata := chunkdata[cursor : cursor+signatureLength] - if len(sigdata) > 0 { - signature = &Signature{} - copy(signature[:], sigdata) - } - } - - return signature, period, version, name, data, ismultihash, nil -} - -// Adds an actual data update -// -// Uses the data currently loaded in the resources map entry. -// It is the caller's responsibility to make sure that this data is not stale. -// -// A resource update cannot span chunks, and thus has max length 4096 -func (h *Handler) UpdateMultihash(ctx context.Context, name string, data []byte) (storage.Address, error) { - // \TODO perhaps this check should be in newUpdateChunk() - if _, _, err := multihash.GetMultihashLength(data); err != nil { - return nil, NewError(ErrNothingToReturn, err.Error()) - } - return h.update(ctx, name, data, true) -} - -func (h *Handler) Update(ctx context.Context, name string, data []byte) (storage.Address, error) { - return h.update(ctx, name, data, false) -} - -// create and commit an update -func (h *Handler) update(ctx context.Context, name string, data []byte, multihash bool) (storage.Address, error) { - - // zero-length updates are bogus - if len(data) == 0 { - return nil, NewError(ErrInvalidValue, "I refuse to waste swarm space for updates with empty values, amigo (data length is 0)") - } - - // we can't update anything without a store - if h.chunkStore == nil { - return nil, NewError(ErrInit, "Call Handler.SetStore() before updating") - } - - // signature length is 0 if we are not using them - var signaturelength int - if h.signer != nil { - signaturelength = signatureLength - } - - // get the cached information - nameHash := ens.EnsNode(name) - nameHashHex := nameHash.Hex() - rsrc := h.get(nameHashHex) - if rsrc == nil { - return nil, NewError(ErrNotFound, fmt.Sprintf(" object '%s' not in index", name)) - } else if !rsrc.isSynced() { - return nil, NewError(ErrNotSynced, " object not in sync") - } - - // an update can be only one chunk long; data length less header and signature data - // 12 = length of header and data length fields (2xuint16) plus period and frequency value fields (2xuint32) - datalimit := h.chunkSize() - int64(signaturelength-len(name)-12) - if int64(len(data)) > datalimit { - return nil, NewError(ErrDataOverflow, fmt.Sprintf("Data overflow: %d / %d bytes", len(data), datalimit)) - } - - // get our blockheight at this time and the next block of the update period - currentblock, err := h.getBlock(ctx, name) - if err != nil { - return nil, NewError(ErrIO, fmt.Sprintf("Could not get block height: %v", err)) - } - nextperiod, err := getNextPeriod(rsrc.startBlock, currentblock, rsrc.frequency) - if err != nil { - return nil, err - } - - // if we already have an update for this block then increment version - // resource object MUST be in sync for version to be correct, but we checked this earlier in the method already - var version uint32 - if h.hasUpdate(nameHashHex, nextperiod) { - version = rsrc.version - } - version++ - - // calculate the chunk key - key := h.resourceHash(nextperiod, version, rsrc.nameHash) - - // if we have a signing function, sign the update - // \TODO this code should probably be consolidated with corresponding code in New() - var signature *Signature - if h.signer != nil { - // sign the data hash with the key - digest := h.keyDataHash(key, data) - sig, err := h.signer.Sign(digest) - if err != nil { - return nil, NewError(ErrInvalidSignature, fmt.Sprintf("Sign fail: %v", err)) - } - signature = &sig - - // get the address of the signer (which also checks that it's a valid signature) - addr, err := getAddressFromDataSig(digest, *signature) - if err != nil { - return nil, NewError(ErrInvalidSignature, fmt.Sprintf("Invalid data/signature: %v", err)) - } - if h.signer != nil { - // check if the signer has access to update - ok, err := h.checkAccess(name, addr) - if err != nil { - return nil, NewError(ErrIO, fmt.Sprintf("Access check fail: %v", err)) - } else if !ok { - return nil, NewError(ErrUnauthorized, fmt.Sprintf("Address %x does not have access to update %s", addr, name)) - } - } - } - - // a datalength field set to 0 means the content is a multihash - var datalength int - if !multihash { - datalength = len(data) - } - chunk := newUpdateChunk(key, signature, nextperiod, version, name, data, datalength) - - // send the chunk - h.chunkStore.Put(chunk) - log.Trace("resource update", "name", name, "key", key, "currentblock", currentblock, "lastperiod", nextperiod, "version", version, "data", chunk.SData, "multihash", multihash) - - // update our resources map entry and return the new key - rsrc.lastPeriod = nextperiod - rsrc.version = version - rsrc.data = make([]byte, len(data)) - copy(rsrc.data, data) - return key, nil -} - -// Closes the datastore. -// Always call this at shutdown to avoid data corruption. -func (h *Handler) Close() { - h.chunkStore.Close() -} - -// gets the current block height -func (h *Handler) getBlock(ctx context.Context, name string) (uint64, error) { - blockheader, err := h.headerGetter.HeaderByNumber(ctx, name, nil) - if err != nil { - return 0, err - } - return blockheader.Number.Uint64(), nil -} - -// Calculate the period index (aka major version number) from a given block number -func (h *Handler) BlockToPeriod(name string, blocknumber uint64) (uint32, error) { - return getNextPeriod(h.resources[name].startBlock, blocknumber, h.resources[name].frequency) -} - -// Calculate the block number from a given period index (aka major version number) -func (h *Handler) PeriodToBlock(name string, period uint32) uint64 { - return h.resources[name].startBlock + (uint64(period) * h.resources[name].frequency) -} - -// Retrieves the resource index value for the given nameHash -func (h *Handler) get(nameHash string) *resource { - h.resourceLock.RLock() - defer h.resourceLock.RUnlock() - rsrc := h.resources[nameHash] - return rsrc -} - -// Sets the resource index value for the given nameHash -func (h *Handler) set(nameHash string, rsrc *resource) { - h.resourceLock.Lock() - defer h.resourceLock.Unlock() - h.resources[nameHash] = rsrc -} - -// used for chunk keys -func (h *Handler) resourceHash(period uint32, version uint32, namehash common.Hash) storage.Address { - // format is: hash(period|version|namehash) - hasher := h.hashPool.Get().(storage.SwarmHash) - defer h.hashPool.Put(hasher) - hasher.Reset() - b := make([]byte, 4) - binary.LittleEndian.PutUint32(b, period) - hasher.Write(b) - binary.LittleEndian.PutUint32(b, version) - hasher.Write(b) - hasher.Write(namehash[:]) - return hasher.Sum(nil) -} - -// Checks if we already have an update on this resource, according to the value in the current state of the resource index -func (h *Handler) hasUpdate(nameHash string, period uint32) bool { - return h.resources[nameHash].lastPeriod == period -} - -func getAddressFromDataSig(datahash common.Hash, signature Signature) (common.Address, error) { - pub, err := crypto.SigToPub(datahash.Bytes(), signature[:]) - if err != nil { - return common.Address{}, err - } - return crypto.PubkeyToAddress(*pub), nil -} - -// create an update chunk -func newUpdateChunk(addr storage.Address, signature *Signature, period uint32, version uint32, name string, data []byte, datalength int) *storage.Chunk { - - // no signatures if no validator - var signaturelength int - if signature != nil { - signaturelength = signatureLength - } - - // prepend version and period to allow reverse lookups - headerlength := len(name) + 4 + 4 - - actualdatalength := len(data) - chunk := storage.NewChunk(addr, nil) - chunk.SData = make([]byte, 4+signaturelength+headerlength+actualdatalength) // initial 4 are uint16 length descriptors for headerlength and datalength - - // data header length does NOT include the header length prefix bytes themselves - cursor := 0 - binary.LittleEndian.PutUint16(chunk.SData[cursor:], uint16(headerlength)) - cursor += 2 - - // data length - binary.LittleEndian.PutUint16(chunk.SData[cursor:], uint16(datalength)) - cursor += 2 - - // header = period + version + name - binary.LittleEndian.PutUint32(chunk.SData[cursor:], period) - cursor += 4 - - binary.LittleEndian.PutUint32(chunk.SData[cursor:], version) - cursor += 4 - - namebytes := []byte(name) - copy(chunk.SData[cursor:], namebytes) - cursor += len(namebytes) - - // add the data - copy(chunk.SData[cursor:], data) - - // if signature is present it's the last item in the chunk data - if signature != nil { - cursor += actualdatalength - copy(chunk.SData[cursor:], signature[:]) - } - - chunk.Size = int64(len(chunk.SData)) - return chunk -} - -// Helper function to calculate the next update period number from the current block, start block and frequency +// Helper function to calculate the next update period number from the current time, start time and frequency func getNextPeriod(start uint64, current uint64, frequency uint64) (uint32, error) { if current < start { - return 0, NewError(ErrInvalidValue, fmt.Sprintf("given current block value %d < start block %d", current, start)) - } - blockdiff := current - start - period := blockdiff / frequency - return uint32(period + 1), nil -} - -// ToSafeName is a helper function to create an valid idna of a given resource update name -func ToSafeName(name string) (string, error) { - return idna.ToASCII(name) -} - -// check that name identifiers contain valid bytes -// Strings created using ToSafeName() should satisfy this check -func isSafeName(name string) bool { - if name == "" { - return false - } - validname, err := idna.ToASCII(name) - if err != nil { - return false - } - return validname == name -} - -func NewTestHandler(datadir string, params *HandlerParams) (*Handler, error) { - path := filepath.Join(datadir, DbDirName) - rh, err := NewHandler(params) - if err != nil { - return nil, fmt.Errorf("resource handler create fail: %v", err) + return 0, NewErrorf(ErrInvalidValue, "given current time value %d < start time %d", current, start) } - localstoreparams := storage.NewDefaultLocalStoreParams() - localstoreparams.Init(path) - localStore, err := storage.NewLocalStore(localstoreparams, nil) - if err != nil { - return nil, fmt.Errorf("localstore create fail, path %s: %v", path, err) + if frequency == 0 { + return 0, NewError(ErrInvalidValue, "frequency is 0") } - localStore.Validators = append(localStore.Validators, storage.NewContentAddressValidator(storage.MakeHashFunc(resourceHash))) - localStore.Validators = append(localStore.Validators, rh) - netStore := storage.NewNetStore(localStore, nil) - rh.SetStore(netStore) - return rh, nil + timeDiff := current - start + period := timeDiff / frequency + return uint32(period + 1), nil } diff --git a/swarm/storage/mru/resource_sign.go b/swarm/storage/mru/resource_sign.go index c6185a3bbc45..a9f7cb6296ae 100644 --- a/swarm/storage/mru/resource_sign.go +++ b/swarm/storage/mru/resource_sign.go @@ -23,20 +23,44 @@ import ( "github.com/ethereum/go-ethereum/crypto" ) -// Signs resource updates +const signatureLength = 65 + +// Signature is an alias for a static byte array with the size of a signature +type Signature [signatureLength]byte + +// Signer signs Mutable Resource update payloads type Signer interface { Sign(common.Hash) (Signature, error) + Address() common.Address } +// GenericSigner implements the Signer interface +// It is the vanilla signer that probably should be used in most cases type GenericSigner struct { PrivKey *ecdsa.PrivateKey + address common.Address } -func (self *GenericSigner) Sign(data common.Hash) (signature Signature, err error) { - signaturebytes, err := crypto.Sign(data.Bytes(), self.PrivKey) +// NewGenericSigner builds a signer that will sign everything with the provided private key +func NewGenericSigner(privKey *ecdsa.PrivateKey) *GenericSigner { + return &GenericSigner{ + PrivKey: privKey, + address: crypto.PubkeyToAddress(privKey.PublicKey), + } +} + +// Sign signs the supplied data +// It wraps the ethereum crypto.Sign() method +func (s *GenericSigner) Sign(data common.Hash) (signature Signature, err error) { + signaturebytes, err := crypto.Sign(data.Bytes(), s.PrivKey) if err != nil { return } copy(signature[:], signaturebytes) return } + +// PublicKey returns the public key of the signer's private key +func (s *GenericSigner) Address() common.Address { + return s.address +} diff --git a/swarm/storage/mru/resource_test.go b/swarm/storage/mru/resource_test.go index aa1860359d37..76d7c58a1e89 100644 --- a/swarm/storage/mru/resource_test.go +++ b/swarm/storage/mru/resource_test.go @@ -22,71 +22,110 @@ import ( "crypto/rand" "encoding/binary" "flag" - "fmt" "io/ioutil" - "math/big" "os" - "strings" "testing" "time" - "github.com/ethereum/go-ethereum/accounts/abi/bind" - "github.com/ethereum/go-ethereum/accounts/abi/bind/backends" - "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/contracts/ens" - "github.com/ethereum/go-ethereum/contracts/ens/contract" - "github.com/ethereum/go-ethereum/core" - "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/swarm/chunk" "github.com/ethereum/go-ethereum/swarm/multihash" "github.com/ethereum/go-ethereum/swarm/storage" ) var ( - loglevel = flag.Int("loglevel", 3, "loglevel") - testHasher = storage.MakeHashFunc(storage.SHA3Hash)() - zeroAddr = common.Address{} - startBlock = uint64(4200) + loglevel = flag.Int("loglevel", 3, "loglevel") + testHasher = storage.MakeHashFunc(resourceHashAlgorithm)() + startTime = Timestamp{ + Time: uint64(4200), + } resourceFrequency = uint64(42) cleanF func() - domainName = "føø.bar" - safeName string - nameHash common.Hash + resourceName = "føø.bar" hashfunc = storage.MakeHashFunc(storage.DefaultHash) ) func init() { - var err error flag.Parse() log.Root().SetHandler(log.CallerFileHandler(log.LvlFilterHandler(log.Lvl(*loglevel), log.StreamHandler(os.Stderr, log.TerminalFormat(true))))) - safeName, err = ToSafeName(domainName) - if err != nil { - panic(err) - } - nameHash = ens.EnsNode(safeName) } -// simulated backend does not have the blocknumber call -// so we use this wrapper to fake returning the block count -type fakeBackend struct { - *backends.SimulatedBackend - blocknumber int64 +// simulated timeProvider +type fakeTimeProvider struct { + currentTime uint64 +} + +func (f *fakeTimeProvider) Tick() { + f.currentTime++ } -func (f *fakeBackend) Commit() { - if f.SimulatedBackend != nil { - f.SimulatedBackend.Commit() +func (f *fakeTimeProvider) Now() Timestamp { + return Timestamp{ + Time: f.currentTime, } - f.blocknumber++ } -func (f *fakeBackend) HeaderByNumber(context context.Context, name string, bigblock *big.Int) (*types.Header, error) { - f.blocknumber++ - biggie := big.NewInt(f.blocknumber) - return &types.Header{ - Number: biggie, - }, nil +func TestUpdateChunkSerializationErrorChecking(t *testing.T) { + + // Test that parseUpdate fails if the chunk is too small + var r SignedResourceUpdate + if err := r.fromChunk(storage.ZeroAddr, make([]byte, minimumUpdateDataLength-1)); err == nil { + t.Fatalf("Expected parseUpdate to fail when chunkData contains less than %d bytes", minimumUpdateDataLength) + } + + r = SignedResourceUpdate{} + // Test that parseUpdate fails when the length header does not match the data array length + fakeChunk := make([]byte, 150) + binary.LittleEndian.PutUint16(fakeChunk, 44) + if err := r.fromChunk(storage.ZeroAddr, fakeChunk); err == nil { + t.Fatal("Expected parseUpdate to fail when the header length does not match the actual data array passed in") + } + + r = SignedResourceUpdate{ + resourceUpdate: resourceUpdate{ + updateHeader: updateHeader{ + UpdateLookup: UpdateLookup{ + + rootAddr: make([]byte, 79), // put the wrong length, should be storage.KeyLength + }, + metaHash: nil, + multihash: false, + }, + }, + } + _, err := r.toChunk() + if err == nil { + t.Fatal("Expected newUpdateChunk to fail when rootAddr or metaHash have the wrong length") + } + r.rootAddr = make([]byte, storage.KeyLength) + r.metaHash = make([]byte, storage.KeyLength) + _, err = r.toChunk() + if err == nil { + t.Fatal("Expected newUpdateChunk to fail when there is no data") + } + r.data = make([]byte, 79) // put some arbitrary length data + _, err = r.toChunk() + if err == nil { + t.Fatal("expected newUpdateChunk to fail when there is no signature", err) + } + + alice := newAliceSigner() + if err := r.Sign(alice); err != nil { + t.Fatalf("error signing:%s", err) + + } + _, err = r.toChunk() + if err != nil { + t.Fatalf("error creating update chunk:%s", err) + } + + r.multihash = true + r.data[1] = 79 // mess with the multihash, corrupting one byte of it. + if err := r.Sign(alice); err == nil { + t.Fatal("expected Sign() to fail when an invalid multihash is in data and multihash=true", err) + } } // check that signature address matches update signer address @@ -95,21 +134,32 @@ func TestReverse(t *testing.T) { period := uint32(4) version := uint32(2) - // signer containing private key - signer, err := newTestSigner() - if err != nil { - t.Fatal(err) + // make fake timeProvider + timeProvider := &fakeTimeProvider{ + currentTime: startTime.Time, } + // signer containing private key + signer := newAliceSigner() + // set up rpc and create resourcehandler - rh, _, teardownTest, err := setupTest(nil, nil, signer) + _, _, teardownTest, err := setupTest(timeProvider, signer) if err != nil { t.Fatal(err) } defer teardownTest() - // generate a hash for block 4200 version 1 - key := rh.resourceHash(period, version, ens.EnsNode(safeName)) + metadata := ResourceMetadata{ + Name: resourceName, + StartTime: startTime, + Frequency: resourceFrequency, + Owner: signer.Address(), + } + + rootAddr, metaHash, _, err := metadata.serializeAndHash() + if err != nil { + t.Fatal(err) + } // generate some bogus data for the chunk and sign it data := make([]byte, 8) @@ -119,21 +169,42 @@ func TestReverse(t *testing.T) { } testHasher.Reset() testHasher.Write(data) - digest := rh.keyDataHash(key, data) - sig, err := rh.signer.Sign(digest) - if err != nil { + + update := &SignedResourceUpdate{ + resourceUpdate: resourceUpdate{ + updateHeader: updateHeader{ + UpdateLookup: UpdateLookup{ + period: period, + version: version, + rootAddr: rootAddr, + }, + metaHash: metaHash, + }, + data: data, + }, + } + // generate a hash for t=4200 version 1 + key := update.UpdateAddr() + + if err = update.Sign(signer); err != nil { t.Fatal(err) } - chunk := newUpdateChunk(key, &sig, period, version, safeName, data, len(data)) + chunk, err := update.toChunk() + if err != nil { + t.Fatal(err) + } // check that we can recover the owner account from the update chunk's signature - checksig, checkperiod, checkversion, checkname, checkdata, _, err := rh.parseUpdate(chunk.SData) + var checkUpdate SignedResourceUpdate + if err := checkUpdate.fromChunk(chunk.Addr, chunk.SData); err != nil { + t.Fatal(err) + } + checkdigest, err := checkUpdate.GetDigest() if err != nil { t.Fatal(err) } - checkdigest := rh.keyDataHash(chunk.Addr, checkdata) - recoveredaddress, err := getAddressFromDataSig(checkdigest, *checksig) + recoveredaddress, err := getOwner(checkdigest, *checkUpdate.signature) if err != nil { t.Fatalf("Retrieve address from signature fail: %v", err) } @@ -147,28 +218,29 @@ func TestReverse(t *testing.T) { if !bytes.Equal(key[:], chunk.Addr[:]) { t.Fatalf("Expected chunk key '%x', was '%x'", key, chunk.Addr) } - if period != checkperiod { - t.Fatalf("Expected period '%d', was '%d'", period, checkperiod) + if period != checkUpdate.period { + t.Fatalf("Expected period '%d', was '%d'", period, checkUpdate.period) } - if version != checkversion { - t.Fatalf("Expected version '%d', was '%d'", version, checkversion) + if version != checkUpdate.version { + t.Fatalf("Expected version '%d', was '%d'", version, checkUpdate.version) } - if safeName != checkname { - t.Fatalf("Expected name '%s', was '%s'", safeName, checkname) - } - if !bytes.Equal(data, checkdata) { - t.Fatalf("Expectedn data '%x', was '%x'", data, checkdata) + if !bytes.Equal(data, checkUpdate.data) { + t.Fatalf("Expectedn data '%x', was '%x'", data, checkUpdate.data) } } // make updates and retrieve them based on periods and versions -func TestHandler(t *testing.T) { +func TestResourceHandler(t *testing.T) { - // make fake backend, set up rpc and create resourcehandler - backend := &fakeBackend{ - blocknumber: int64(startBlock), + // make fake timeProvider + timeProvider := &fakeTimeProvider{ + currentTime: startTime.Time, } - rh, datadir, teardownTest, err := setupTest(backend, nil, nil) + + // signer containing private key + signer := newAliceSigner() + + rh, datadir, teardownTest, err := setupTest(timeProvider, signer) if err != nil { t.Fatal(err) } @@ -177,24 +249,45 @@ func TestHandler(t *testing.T) { // create a new resource ctx, cancel := context.WithCancel(context.Background()) defer cancel() - rootChunkKey, _, err := rh.New(ctx, safeName, resourceFrequency) + + metadata := &ResourceMetadata{ + Name: resourceName, + Frequency: resourceFrequency, + StartTime: Timestamp{Time: timeProvider.Now().Time}, + Owner: signer.Address(), + } + + request, err := NewCreateUpdateRequest(metadata) + if err != nil { + t.Fatal(err) + } + request.Sign(signer) + if err != nil { + t.Fatal(err) + } + err = rh.New(ctx, request) if err != nil { t.Fatal(err) } - chunk, err := rh.chunkStore.Get(storage.Address(rootChunkKey)) + chunk, err := rh.chunkStore.Get(context.TODO(), storage.Address(request.rootAddr)) if err != nil { t.Fatal(err) } else if len(chunk.SData) < 16 { t.Fatalf("chunk data must be minimum 16 bytes, is %d", len(chunk.SData)) } - startblocknumber := binary.LittleEndian.Uint64(chunk.SData[2:10]) - chunkfrequency := binary.LittleEndian.Uint64(chunk.SData[10:]) - if startblocknumber != uint64(backend.blocknumber) { - t.Fatalf("stored block number %d does not match provided block number %d", startblocknumber, backend.blocknumber) + + var recoveredMetadata ResourceMetadata + + recoveredMetadata.binaryGet(chunk.SData) + if err != nil { + t.Fatal(err) + } + if recoveredMetadata.StartTime.Time != timeProvider.currentTime { + t.Fatalf("stored startTime %d does not match provided startTime %d", recoveredMetadata.StartTime.Time, timeProvider.currentTime) } - if chunkfrequency != resourceFrequency { - t.Fatalf("stored frequency %d does not match provided frequency %d", chunkfrequency, resourceFrequency) + if recoveredMetadata.Frequency != resourceFrequency { + t.Fatalf("stored frequency %d does not match provided frequency %d", recoveredMetadata.Frequency, resourceFrequency) } // data for updates: @@ -205,232 +298,273 @@ func TestHandler(t *testing.T) { "clyde", } - // update halfway to first period + // update halfway to first period. period=1, version=1 resourcekey := make(map[string]storage.Address) - fwdBlocks(int(resourceFrequency/2), backend) + fwdClock(int(resourceFrequency/2), timeProvider) data := []byte(updates[0]) - resourcekey[updates[0]], err = rh.Update(ctx, safeName, data) + request.SetData(data, false) + if err := request.Sign(signer); err != nil { + t.Fatal(err) + } + resourcekey[updates[0]], err = rh.Update(ctx, &request.SignedResourceUpdate) if err != nil { t.Fatal(err) } - // update on first period - fwdBlocks(int(resourceFrequency/2), backend) + // update on first period with version = 1 to make it fail since there is already one update with version=1 + request, err = rh.NewUpdateRequest(ctx, request.rootAddr) + if err != nil { + t.Fatal(err) + } + if request.version != 2 || request.period != 1 { + t.Fatal("Suggested period should be 1 and version should be 2") + } + + request.version = 1 // force version 1 instead of 2 to make it fail data = []byte(updates[1]) - resourcekey[updates[1]], err = rh.Update(ctx, safeName, data) + request.SetData(data, false) + if err := request.Sign(signer); err != nil { + t.Fatal(err) + } + resourcekey[updates[1]], err = rh.Update(ctx, &request.SignedResourceUpdate) + if err == nil { + t.Fatal("Expected update to fail since this version already exists") + } + + // update on second period with version = 1, correct. period=2, version=1 + fwdClock(int(resourceFrequency/2), timeProvider) + request, err = rh.NewUpdateRequest(ctx, request.rootAddr) + if err != nil { + t.Fatal(err) + } + request.SetData(data, false) + if err := request.Sign(signer); err != nil { + t.Fatal(err) + } + resourcekey[updates[1]], err = rh.Update(ctx, &request.SignedResourceUpdate) if err != nil { t.Fatal(err) } - // update on second period - fwdBlocks(int(resourceFrequency), backend) + fwdClock(int(resourceFrequency), timeProvider) + // Update on third period, with version = 1 + request, err = rh.NewUpdateRequest(ctx, request.rootAddr) + if err != nil { + t.Fatal(err) + } data = []byte(updates[2]) - resourcekey[updates[2]], err = rh.Update(ctx, safeName, data) + request.SetData(data, false) + if err := request.Sign(signer); err != nil { + t.Fatal(err) + } + resourcekey[updates[2]], err = rh.Update(ctx, &request.SignedResourceUpdate) if err != nil { t.Fatal(err) } - // update just after second period - fwdBlocks(1, backend) + // update just after third period + fwdClock(1, timeProvider) + request, err = rh.NewUpdateRequest(ctx, request.rootAddr) + if err != nil { + t.Fatal(err) + } + if request.period != 3 || request.version != 2 { + t.Fatal("Suggested period should be 3 and version should be 2") + } data = []byte(updates[3]) - resourcekey[updates[3]], err = rh.Update(ctx, safeName, data) + request.SetData(data, false) + + if err := request.Sign(signer); err != nil { + t.Fatal(err) + } + resourcekey[updates[3]], err = rh.Update(ctx, &request.SignedResourceUpdate) if err != nil { t.Fatal(err) } + time.Sleep(time.Second) rh.Close() // check we can retrieve the updates after close - // it will match on second iteration startblocknumber + (resourceFrequency * 3) - fwdBlocks(int(resourceFrequency*2)-1, backend) + // it will match on second iteration startTime + (resourceFrequency * 3) + fwdClock(int(resourceFrequency*2)-1, timeProvider) - rhparams := &HandlerParams{ - QueryMaxPeriods: &LookupParams{ - Limit: false, - }, - Signer: nil, - HeaderGetter: rh.headerGetter, - } + rhparams := &HandlerParams{} rh2, err := NewTestHandler(datadir, rhparams) if err != nil { t.Fatal(err) } - rsrc2, err := rh2.Load(rootChunkKey) - _, err = rh2.LookupLatest(ctx, nameHash, true, nil) + + rsrc2, err := rh2.Load(context.TODO(), request.rootAddr) + if err != nil { + t.Fatal(err) + } + + _, err = rh2.Lookup(ctx, LookupLatest(request.rootAddr)) if err != nil { t.Fatal(err) } - // last update should be "clyde", version two, blockheight startblocknumber + (resourcefrequency * 3) + // last update should be "clyde", version two, time= startTime + (resourcefrequency * 3) if !bytes.Equal(rsrc2.data, []byte(updates[len(updates)-1])) { - t.Fatalf("resource data was %v, expected %v", rsrc2.data, updates[len(updates)-1]) + t.Fatalf("resource data was %v, expected %v", string(rsrc2.data), updates[len(updates)-1]) } if rsrc2.version != 2 { t.Fatalf("resource version was %d, expected 2", rsrc2.version) } - if rsrc2.lastPeriod != 3 { - t.Fatalf("resource period was %d, expected 3", rsrc2.lastPeriod) + if rsrc2.period != 3 { + t.Fatalf("resource period was %d, expected 3", rsrc2.period) } - log.Debug("Latest lookup", "period", rsrc2.lastPeriod, "version", rsrc2.version, "data", rsrc2.data) + log.Debug("Latest lookup", "period", rsrc2.period, "version", rsrc2.version, "data", rsrc2.data) - // specific block, latest version - rsrc, err := rh2.LookupHistorical(ctx, nameHash, 3, true, rh2.queryMaxPeriods) + // specific period, latest version + rsrc, err := rh2.Lookup(ctx, LookupLatestVersionInPeriod(request.rootAddr, 3)) if err != nil { t.Fatal(err) } // check data if !bytes.Equal(rsrc.data, []byte(updates[len(updates)-1])) { - t.Fatalf("resource data (historical) was %v, expected %v", rsrc2.data, updates[len(updates)-1]) + t.Fatalf("resource data (historical) was %v, expected %v", string(rsrc2.data), updates[len(updates)-1]) } - log.Debug("Historical lookup", "period", rsrc2.lastPeriod, "version", rsrc2.version, "data", rsrc2.data) + log.Debug("Historical lookup", "period", rsrc2.period, "version", rsrc2.version, "data", rsrc2.data) - // specific block, specific version - rsrc, err = rh2.LookupVersion(ctx, nameHash, 3, 1, true, rh2.queryMaxPeriods) + // specific period, specific version + lookupParams := LookupVersion(request.rootAddr, 3, 1) + rsrc, err = rh2.Lookup(ctx, lookupParams) if err != nil { t.Fatal(err) } // check data if !bytes.Equal(rsrc.data, []byte(updates[2])) { - t.Fatalf("resource data (historical) was %v, expected %v", rsrc2.data, updates[2]) + t.Fatalf("resource data (historical) was %v, expected %v", string(rsrc2.data), updates[2]) } - log.Debug("Specific version lookup", "period", rsrc2.lastPeriod, "version", rsrc2.version, "data", rsrc2.data) + log.Debug("Specific version lookup", "period", rsrc2.period, "version", rsrc2.version, "data", rsrc2.data) // we are now at third update // check backwards stepping to the first for i := 1; i >= 0; i-- { - rsrc, err := rh2.LookupPreviousByName(ctx, safeName, rh2.queryMaxPeriods) + rsrc, err := rh2.LookupPrevious(ctx, lookupParams) if err != nil { t.Fatal(err) } if !bytes.Equal(rsrc.data, []byte(updates[i])) { - t.Fatalf("resource data (previous) was %v, expected %v", rsrc2.data, updates[i]) + t.Fatalf("resource data (previous) was %v, expected %v", rsrc.data, updates[i]) } } // beyond the first should yield an error - rsrc, err = rh2.LookupPreviousByName(ctx, safeName, rh2.queryMaxPeriods) + rsrc, err = rh2.LookupPrevious(ctx, lookupParams) if err == nil { - t.Fatalf("expeected previous to fail, returned period %d version %d data %v", rsrc2.lastPeriod, rsrc2.version, rsrc2.data) + t.Fatalf("expected previous to fail, returned period %d version %d data %v", rsrc.period, rsrc.version, rsrc.data) } } -// create ENS enabled resource update, with and without valid owner -func TestENSOwner(t *testing.T) { +func TestMultihash(t *testing.T) { + + // make fake timeProvider + timeProvider := &fakeTimeProvider{ + currentTime: startTime.Time, + } // signer containing private key - signer, err := newTestSigner() + signer := newAliceSigner() + + // set up rpc and create resourcehandler + rh, datadir, teardownTest, err := setupTest(timeProvider, signer) if err != nil { t.Fatal(err) } + defer teardownTest() - // ens address and transact options - addr := crypto.PubkeyToAddress(signer.PrivKey.PublicKey) - transactOpts := bind.NewKeyedTransactor(signer.PrivKey) + // create a new resource + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() - // set up ENS sim - domainparts := strings.Split(safeName, ".") - contractAddr, contractbackend, err := setupENS(addr, transactOpts, domainparts[0], domainparts[1]) - if err != nil { - t.Fatal(err) + metadata := &ResourceMetadata{ + Name: resourceName, + Frequency: resourceFrequency, + StartTime: Timestamp{Time: timeProvider.Now().Time}, + Owner: signer.Address(), } - ensClient, err := ens.NewENS(transactOpts, contractAddr, contractbackend) + mr, err := NewCreateRequest(metadata) if err != nil { t.Fatal(err) } - - // set up rpc and create resourcehandler with ENS sim backend - rh, _, teardownTest, err := setupTest(contractbackend, ensClient, signer) + err = rh.New(ctx, mr) if err != nil { t.Fatal(err) } - defer teardownTest() - // create new resource when we are owner = ok - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - _, _, err = rh.New(ctx, safeName, resourceFrequency) + // we're naïvely assuming keccak256 for swarm hashes + // if it ever changes this test should also change + multihashbytes := ens.EnsNode("foo") + multihashmulti := multihash.ToMultihash(multihashbytes.Bytes()) if err != nil { - t.Fatalf("Create resource fail: %v", err) + t.Fatal(err) } - - data := []byte("foo") - // update resource when we are owner = ok - _, err = rh.Update(ctx, safeName, data) + mr.SetData(multihashmulti, true) + mr.Sign(signer) if err != nil { - t.Fatalf("Update resource fail: %v", err) + t.Fatal(err) } - - // update resource when we are not owner = !ok - signertwo, err := newTestSigner() + multihashkey, err := rh.Update(ctx, &mr.SignedResourceUpdate) if err != nil { t.Fatal(err) } - rh.signer = signertwo - _, err = rh.Update(ctx, safeName, data) - if err == nil { - t.Fatalf("Expected resource update fail due to owner mismatch") - } -} - -func TestMultihash(t *testing.T) { - // signer containing private key - signer, err := newTestSigner() + sha1bytes := make([]byte, multihash.MultihashLength) + sha1multi := multihash.ToMultihash(sha1bytes) if err != nil { t.Fatal(err) } - - // make fake backend, set up rpc and create resourcehandler - backend := &fakeBackend{ - blocknumber: int64(startBlock), + mr, err = rh.NewUpdateRequest(ctx, mr.rootAddr) + if err != nil { + t.Fatal(err) } - - // set up rpc and create resourcehandler - rh, datadir, teardownTest, err := setupTest(backend, nil, nil) + mr.SetData(sha1multi, true) + mr.Sign(signer) if err != nil { t.Fatal(err) } - defer teardownTest() - - // create a new resource - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - _, _, err = rh.New(ctx, safeName, resourceFrequency) + sha1key, err := rh.Update(ctx, &mr.SignedResourceUpdate) if err != nil { t.Fatal(err) } - // we're naïvely assuming keccak256 for swarm hashes - // if it ever changes this test should also change - multihashbytes := ens.EnsNode("foo") - multihashmulti := multihash.ToMultihash(multihashbytes.Bytes()) - multihashkey, err := rh.UpdateMultihash(ctx, safeName, multihashmulti) + // invalid multihashes + mr, err = rh.NewUpdateRequest(ctx, mr.rootAddr) if err != nil { t.Fatal(err) } - - sha1bytes := make([]byte, multihash.MultihashLength) - sha1multi := multihash.ToMultihash(sha1bytes) - sha1key, err := rh.UpdateMultihash(ctx, safeName, sha1multi) + mr.SetData(multihashmulti[1:], true) + mr.Sign(signer) if err != nil { t.Fatal(err) } - - // invalid multihashes - _, err = rh.UpdateMultihash(ctx, safeName, multihashmulti[1:]) + _, err = rh.Update(ctx, &mr.SignedResourceUpdate) if err == nil { t.Fatalf("Expected update to fail with first byte skipped") } - _, err = rh.UpdateMultihash(ctx, safeName, multihashmulti[:len(multihashmulti)-2]) + mr, err = rh.NewUpdateRequest(ctx, mr.rootAddr) + if err != nil { + t.Fatal(err) + } + mr.SetData(multihashmulti[:len(multihashmulti)-2], true) + mr.Sign(signer) + if err != nil { + t.Fatal(err) + } + + _, err = rh.Update(ctx, &mr.SignedResourceUpdate) if err == nil { t.Fatalf("Expected update to fail with last byte skipped") } - data, err := getUpdateDirect(rh, multihashkey) + data, err := getUpdateDirect(rh.Handler, multihashkey) if err != nil { t.Fatal(err) } @@ -441,7 +575,7 @@ func TestMultihash(t *testing.T) { if !bytes.Equal(multihashdecode, multihashbytes.Bytes()) { t.Fatalf("Decoded hash '%x' does not match original hash '%x'", multihashdecode, multihashbytes.Bytes()) } - data, err = getUpdateDirect(rh, sha1key) + data, err = getUpdateDirect(rh.Handler, sha1key) if err != nil { t.Fatal(err) } @@ -454,33 +588,48 @@ func TestMultihash(t *testing.T) { } rh.Close() - rhparams := &HandlerParams{ - QueryMaxPeriods: &LookupParams{ - Limit: false, - }, - Signer: signer, - HeaderGetter: rh.headerGetter, - OwnerValidator: rh.ownerValidator, - } + rhparams := &HandlerParams{} // test with signed data rh2, err := NewTestHandler(datadir, rhparams) if err != nil { t.Fatal(err) } - _, _, err = rh2.New(ctx, safeName, resourceFrequency) + mr, err = NewCreateRequest(metadata) + if err != nil { + t.Fatal(err) + } + err = rh2.New(ctx, mr) if err != nil { t.Fatal(err) } - multihashsignedkey, err := rh2.UpdateMultihash(ctx, safeName, multihashmulti) + + mr.SetData(multihashmulti, true) + mr.Sign(signer) + if err != nil { t.Fatal(err) } - sha1signedkey, err := rh2.UpdateMultihash(ctx, safeName, sha1multi) + multihashsignedkey, err := rh2.Update(ctx, &mr.SignedResourceUpdate) if err != nil { t.Fatal(err) } - data, err = getUpdateDirect(rh2, multihashsignedkey) + mr, err = rh2.NewUpdateRequest(ctx, mr.rootAddr) + if err != nil { + t.Fatal(err) + } + mr.SetData(sha1multi, true) + mr.Sign(signer) + if err != nil { + t.Fatal(err) + } + + sha1signedkey, err := rh2.Update(ctx, &mr.SignedResourceUpdate) + if err != nil { + t.Fatal(err) + } + + data, err = getUpdateDirect(rh2.Handler, multihashsignedkey) if err != nil { t.Fatal(err) } @@ -491,7 +640,7 @@ func TestMultihash(t *testing.T) { if !bytes.Equal(multihashdecode, multihashbytes.Bytes()) { t.Fatalf("Decoded hash '%x' does not match original hash '%x'", multihashdecode, multihashbytes.Bytes()) } - data, err = getUpdateDirect(rh2, sha1signedkey) + data, err = getUpdateDirect(rh2.Handler, sha1signedkey) if err != nil { t.Fatal(err) } @@ -504,63 +653,95 @@ func TestMultihash(t *testing.T) { } } -func TestChunkValidator(t *testing.T) { - // signer containing private key - signer, err := newTestSigner() - if err != nil { - t.Fatal(err) +// \TODO verify testing of signature validation and enforcement +func TestValidator(t *testing.T) { + + // make fake timeProvider + timeProvider := &fakeTimeProvider{ + currentTime: startTime.Time, } - // ens address and transact options - addr := crypto.PubkeyToAddress(signer.PrivKey.PublicKey) - transactOpts := bind.NewKeyedTransactor(signer.PrivKey) + // signer containing private key. Alice will be the good girl + signer := newAliceSigner() - // set up ENS sim - domainparts := strings.Split(safeName, ".") - contractAddr, contractbackend, err := setupENS(addr, transactOpts, domainparts[0], domainparts[1]) - if err != nil { - t.Fatal(err) - } + // fake signer for false results. Bob will play the bad guy today. + falseSigner := newBobSigner() - ensClient, err := ens.NewENS(transactOpts, contractAddr, contractbackend) + // set up sim timeProvider + rh, _, teardownTest, err := setupTest(timeProvider, signer) if err != nil { t.Fatal(err) } + defer teardownTest() - // set up rpc and create resourcehandler with ENS sim backend - rh, _, teardownTest, err := setupTest(contractbackend, ensClient, signer) + // create new resource + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + metadata := &ResourceMetadata{ + Name: resourceName, + Frequency: resourceFrequency, + StartTime: Timestamp{Time: timeProvider.Now().Time}, + Owner: signer.Address(), + } + mr, err := NewCreateRequest(metadata) if err != nil { t.Fatal(err) } - defer teardownTest() + mr.Sign(signer) - // create new resource when we are owner = ok - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - key, rsrc, err := rh.New(ctx, safeName, resourceFrequency) + err = rh.New(ctx, mr) if err != nil { t.Fatalf("Create resource fail: %v", err) } + // chunk with address data := []byte("foo") - key = rh.resourceHash(1, 1, rsrc.nameHash) - digest := rh.keyDataHash(key, data) - sig, err := rh.signer.Sign(digest) - if err != nil { + mr.SetData(data, false) + if err := mr.Sign(signer); err != nil { t.Fatalf("sign fail: %v", err) } - chunk := newUpdateChunk(key, &sig, 1, 1, safeName, data, len(data)) + chunk, err := mr.SignedResourceUpdate.toChunk() + if err != nil { + t.Fatal(err) + } if !rh.Validate(chunk.Addr, chunk.SData) { t.Fatal("Chunk validator fail on update chunk") } + // chunk with address made from different publickey + if err := mr.Sign(falseSigner); err == nil { + t.Fatalf("Expected Sign to fail since we are using a different OwnerAddr: %v", err) + } + + // chunk with address made from different publickey + mr.metadata.Owner = zeroAddr // set to zero to bypass .Sign() check + if err := mr.Sign(falseSigner); err != nil { + t.Fatalf("sign fail: %v", err) + } + + chunk, err = mr.SignedResourceUpdate.toChunk() + if err != nil { + t.Fatal(err) + } + + if rh.Validate(chunk.Addr, chunk.SData) { + t.Fatal("Chunk validator did not fail on update chunk with false address") + } + ctx, cancel = context.WithTimeout(context.Background(), time.Second) defer cancel() - startBlock, err := rh.getBlock(ctx, safeName) + + metadata = &ResourceMetadata{ + Name: resourceName, + StartTime: TimestampProvider.Now(), + Frequency: resourceFrequency, + Owner: signer.Address(), + } + chunk, _, err = metadata.newChunk() if err != nil { t.Fatal(err) } - chunk = rh.newMetaChunk(safeName, startBlock, resourceFrequency) + if !rh.Validate(chunk.Addr, chunk.SData) { t.Fatal("Chunk validator fail on metadata chunk") } @@ -568,8 +749,17 @@ func TestChunkValidator(t *testing.T) { // tests that the content address validator correctly checks the data // tests that resource update chunks are passed through content address validator -// the test checking the resouce update validator internal correctness is found in resource_test.go -func TestValidator(t *testing.T) { +// there is some redundancy in this test as it also tests content addressed chunks, +// which should be evaluated as invalid chunks by this validator +func TestValidatorInStore(t *testing.T) { + + // make fake timeProvider + TimestampProvider = &fakeTimeProvider{ + currentTime: startTime.Time, + } + + // signer containing private key + signer := newAliceSigner() // set up localstore datadir, err := ioutil.TempDir("", "storage-testresourcevalidator") @@ -585,83 +775,80 @@ func TestValidator(t *testing.T) { t.Fatal(err) } - // add content address validator and resource validator to validators and check puts - // bad should fail, good should pass - store.Validators = append(store.Validators, storage.NewContentAddressValidator(hashfunc)) + // set up resource handler and add is as a validator to the localstore rhParams := &HandlerParams{} - rh, err := NewHandler(rhParams) - if err != nil { - t.Fatal(err) - } + rh := NewHandler(rhParams) store.Validators = append(store.Validators, rh) - chunks := storage.GenerateRandomChunks(storage.DefaultChunkSize, 2) + // create content addressed chunks, one good, one faulty + chunks := storage.GenerateRandomChunks(chunk.DefaultSize, 2) goodChunk := chunks[0] badChunk := chunks[1] badChunk.SData = goodChunk.SData - key := rh.resourceHash(42, 1, ens.EnsNode("xyzzy.eth")) - data := []byte("bar") - uglyChunk := newUpdateChunk(key, nil, 42, 1, "xyzzy.eth", data, len(data)) - storage.PutChunks(store, goodChunk, badChunk, uglyChunk) - if err := goodChunk.GetErrored(); err != nil { - t.Fatalf("expected no error on good content address chunk with both validators, but got: %s", err) + metadata := &ResourceMetadata{ + StartTime: startTime, + Name: "xyzzy", + Frequency: resourceFrequency, + Owner: signer.Address(), } - if err := badChunk.GetErrored(); err == nil { - t.Fatal("expected error on bad chunk address with both validators, but got nil") + + rootChunk, metaHash, err := metadata.newChunk() + if err != nil { + t.Fatal(err) } - if err := uglyChunk.GetErrored(); err != nil { - t.Fatalf("expected no error on resource update chunk with both validators, but got: %s", err) + // create a resource update chunk with correct publickey + updateLookup := UpdateLookup{ + period: 42, + version: 1, + rootAddr: rootChunk.Addr, } - // (redundant check) - // use only resource validator, and check puts - // bad should fail, good should fail, resource should pass - store.Validators[0] = store.Validators[1] - store.Validators = store.Validators[:1] + updateAddr := updateLookup.UpdateAddr() + data := []byte("bar") - chunks = storage.GenerateRandomChunks(storage.DefaultChunkSize, 2) - goodChunk = chunks[0] - badChunk = chunks[1] - badChunk.SData = goodChunk.SData + r := SignedResourceUpdate{ + updateAddr: updateAddr, + resourceUpdate: resourceUpdate{ + updateHeader: updateHeader{ + UpdateLookup: updateLookup, + metaHash: metaHash, + }, + data: data, + }, + } - key = rh.resourceHash(42, 2, ens.EnsNode("xyzzy.eth")) - data = []byte("baz") - uglyChunk = newUpdateChunk(key, nil, 42, 2, "xyzzy.eth", data, len(data)) + r.Sign(signer) - storage.PutChunks(store, goodChunk, badChunk, uglyChunk) + uglyChunk, err := r.toChunk() + if err != nil { + t.Fatal(err) + } + + // put the chunks in the store and check their error status + storage.PutChunks(store, goodChunk) if goodChunk.GetErrored() == nil { t.Fatal("expected error on good content address chunk with resource validator only, but got nil") } + storage.PutChunks(store, badChunk) if badChunk.GetErrored() == nil { t.Fatal("expected error on bad content address chunk with resource validator only, but got nil") } + storage.PutChunks(store, uglyChunk) if err := uglyChunk.GetErrored(); err != nil { t.Fatalf("expected no error on resource update chunk with resource validator only, but got: %s", err) } } -// fast-forward blockheight -func fwdBlocks(count int, backend *fakeBackend) { +// fast-forward clock +func fwdClock(count int, timeProvider *fakeTimeProvider) { for i := 0; i < count; i++ { - backend.Commit() - } -} - -type ensOwnerValidator struct { - *ens.ENS -} - -func (e ensOwnerValidator) ValidateOwner(name string, address common.Address) (bool, error) { - addr, err := e.Owner(ens.EnsNode(name)) - if err != nil { - return false, err + timeProvider.Tick() } - return address == addr, nil } // create rpc and resourcehandler -func setupTest(backend headerGetter, ensBackend *ens.ENS, signer Signer) (rh *Handler, datadir string, teardown func(), err error) { +func setupTest(timeProvider timestampProvider, signer Signer) (rh *TestHandler, datadir string, teardown func(), err error) { var fsClean func() var rpcClean func() @@ -683,84 +870,35 @@ func setupTest(backend headerGetter, ensBackend *ens.ENS, signer Signer) (rh *Ha os.RemoveAll(datadir) } - var ov ownerValidator - if ensBackend != nil { - ov = ensOwnerValidator{ensBackend} - } - - rhparams := &HandlerParams{ - QueryMaxPeriods: &LookupParams{ - Limit: false, - }, - Signer: signer, - HeaderGetter: backend, - OwnerValidator: ov, - } + TimestampProvider = timeProvider + rhparams := &HandlerParams{} rh, err = NewTestHandler(datadir, rhparams) return rh, datadir, cleanF, err } -// Set up simulated ENS backend for use with ENSHandler tests -func setupENS(addr common.Address, transactOpts *bind.TransactOpts, sub string, top string) (common.Address, *fakeBackend, error) { - - // create the domain hash values to pass to the ENS contract methods - var tophash [32]byte - var subhash [32]byte - - testHasher.Reset() - testHasher.Write([]byte(top)) - copy(tophash[:], testHasher.Sum(nil)) - testHasher.Reset() - testHasher.Write([]byte(sub)) - copy(subhash[:], testHasher.Sum(nil)) - - // initialize contract backend and deploy - contractBackend := &fakeBackend{ - SimulatedBackend: backends.NewSimulatedBackend(core.GenesisAlloc{addr: {Balance: big.NewInt(1000000000)}}), - } - - contractAddress, _, ensinstance, err := contract.DeployENS(transactOpts, contractBackend) - if err != nil { - return zeroAddr, nil, fmt.Errorf("can't deploy: %v", err) - } - - // update the registry for the correct owner address - if _, err = ensinstance.SetOwner(transactOpts, [32]byte{}, addr); err != nil { - return zeroAddr, nil, fmt.Errorf("can't setowner: %v", err) - } - contractBackend.Commit() - - if _, err = ensinstance.SetSubnodeOwner(transactOpts, [32]byte{}, tophash, addr); err != nil { - return zeroAddr, nil, fmt.Errorf("can't register top: %v", err) - } - contractBackend.Commit() - - if _, err = ensinstance.SetSubnodeOwner(transactOpts, ens.EnsNode(top), subhash, addr); err != nil { - return zeroAddr, nil, fmt.Errorf("can't register top: %v", err) - } - contractBackend.Commit() +func newAliceSigner() *GenericSigner { + privKey, _ := crypto.HexToECDSA("deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef") + return NewGenericSigner(privKey) +} - return contractAddress, contractBackend, nil +func newBobSigner() *GenericSigner { + privKey, _ := crypto.HexToECDSA("accedeaccedeaccedeaccedeaccedeaccedeaccedeaccedeaccedeaccedecaca") + return NewGenericSigner(privKey) } -func newTestSigner() (*GenericSigner, error) { - privKey, err := crypto.GenerateKey() - if err != nil { - return nil, err - } - return &GenericSigner{ - PrivKey: privKey, - }, nil +func newCharlieSigner() *GenericSigner { + privKey, _ := crypto.HexToECDSA("facadefacadefacadefacadefacadefacadefacadefacadefacadefacadefaca") + return NewGenericSigner(privKey) } func getUpdateDirect(rh *Handler, addr storage.Address) ([]byte, error) { - chunk, err := rh.chunkStore.Get(addr) + chunk, err := rh.chunkStore.Get(context.TODO(), addr) if err != nil { return nil, err } - _, _, _, _, data, _, err := rh.parseUpdate(chunk.SData) - if err != nil { + var r SignedResourceUpdate + if err := r.fromChunk(addr, chunk.SData); err != nil { return nil, err } - return data, nil + return r.data, nil } diff --git a/swarm/storage/mru/signedupdate.go b/swarm/storage/mru/signedupdate.go new file mode 100644 index 000000000000..1c6d02e82aad --- /dev/null +++ b/swarm/storage/mru/signedupdate.go @@ -0,0 +1,184 @@ +// Copyright 2018 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package mru + +import ( + "bytes" + "hash" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/swarm/storage" +) + +// SignedResourceUpdate represents a resource update with all the necessary information to prove ownership of the resource +type SignedResourceUpdate struct { + resourceUpdate // actual content that will be put on the chunk, less signature + signature *Signature + updateAddr storage.Address // resulting chunk address for the update (not serialized, for internal use) + binaryData []byte // resulting serialized data (not serialized, for efficiency/internal use) +} + +// Verify checks that signatures are valid and that the signer owns the resource to be updated +func (r *SignedResourceUpdate) Verify() (err error) { + if len(r.data) == 0 { + return NewError(ErrInvalidValue, "Update does not contain data") + } + if r.signature == nil { + return NewError(ErrInvalidSignature, "Missing signature field") + } + + digest, err := r.GetDigest() + if err != nil { + return err + } + + // get the address of the signer (which also checks that it's a valid signature) + ownerAddr, err := getOwner(digest, *r.signature) + if err != nil { + return err + } + + if !bytes.Equal(r.updateAddr, r.UpdateAddr()) { + return NewError(ErrInvalidSignature, "Signature address does not match with ownerAddr") + } + + // Check if who signed the resource update really owns the resource + if !verifyOwner(ownerAddr, r.metaHash, r.rootAddr) { + return NewErrorf(ErrUnauthorized, "signature is valid but signer does not own the resource: %v", err) + } + + return nil +} + +// Sign executes the signature to validate the resource +func (r *SignedResourceUpdate) Sign(signer Signer) error { + + r.binaryData = nil //invalidate serialized data + digest, err := r.GetDigest() // computes digest and serializes into .binaryData + if err != nil { + return err + } + + signature, err := signer.Sign(digest) + if err != nil { + return err + } + + // Although the Signer interface returns the public address of the signer, + // recover it from the signature to see if they match + ownerAddress, err := getOwner(digest, signature) + if err != nil { + return NewError(ErrInvalidSignature, "Error verifying signature") + } + + if ownerAddress != signer.Address() { // sanity check to make sure the Signer is declaring the same address used to sign! + return NewError(ErrInvalidSignature, "Signer address does not match ownerAddr") + } + + r.signature = &signature + r.updateAddr = r.UpdateAddr() + return nil +} + +// create an update chunk. +func (r *SignedResourceUpdate) toChunk() (*storage.Chunk, error) { + + // Check that the update is signed and serialized + // For efficiency, data is serialized during signature and cached in + // the binaryData field when computing the signature digest in .getDigest() + if r.signature == nil || r.binaryData == nil { + return nil, NewError(ErrInvalidSignature, "newUpdateChunk called without a valid signature or payload data. Call .Sign() first.") + } + + chunk := storage.NewChunk(r.updateAddr, nil) + resourceUpdateLength := r.resourceUpdate.binaryLength() + chunk.SData = r.binaryData + + // signature is the last item in the chunk data + copy(chunk.SData[resourceUpdateLength:], r.signature[:]) + + chunk.Size = int64(len(chunk.SData)) + return chunk, nil +} + +// fromChunk populates this structure from chunk data. It does not verify the signature is valid. +func (r *SignedResourceUpdate) fromChunk(updateAddr storage.Address, chunkdata []byte) error { + // for update chunk layout see SignedResourceUpdate definition + + //deserialize the resource update portion + if err := r.resourceUpdate.binaryGet(chunkdata); err != nil { + return err + } + + // Extract the signature + var signature *Signature + cursor := r.resourceUpdate.binaryLength() + sigdata := chunkdata[cursor : cursor+signatureLength] + if len(sigdata) > 0 { + signature = &Signature{} + copy(signature[:], sigdata) + } + + r.signature = signature + r.updateAddr = updateAddr + r.binaryData = chunkdata + + return nil + +} + +// GetDigest creates the resource update digest used in signatures (formerly known as keyDataHash) +// the serialized payload is cached in .binaryData +func (r *SignedResourceUpdate) GetDigest() (result common.Hash, err error) { + hasher := hashPool.Get().(hash.Hash) + defer hashPool.Put(hasher) + hasher.Reset() + dataLength := r.resourceUpdate.binaryLength() + if r.binaryData == nil { + r.binaryData = make([]byte, dataLength+signatureLength) + if err := r.resourceUpdate.binaryPut(r.binaryData[:dataLength]); err != nil { + return result, err + } + } + hasher.Write(r.binaryData[:dataLength]) //everything except the signature. + + return common.BytesToHash(hasher.Sum(nil)), nil +} + +// getOwner extracts the address of the resource update signer +func getOwner(digest common.Hash, signature Signature) (common.Address, error) { + pub, err := crypto.SigToPub(digest.Bytes(), signature[:]) + if err != nil { + return common.Address{}, err + } + return crypto.PubkeyToAddress(*pub), nil +} + +// verifyResourceOwnerhsip checks that the signer of the update actually owns the resource +// H(ownerAddr, metaHash) is computed. If it matches the rootAddr the update chunk is claiming +// to update, it is proven that signer of the resource update owns the resource. +// See metadataHash in metadata.go for a more detailed explanation +func verifyOwner(ownerAddr common.Address, metaHash []byte, rootAddr storage.Address) bool { + hasher := hashPool.Get().(hash.Hash) + defer hashPool.Put(hasher) + hasher.Reset() + hasher.Write(metaHash) + hasher.Write(ownerAddr.Bytes()) + rootAddr2 := hasher.Sum(nil) + return bytes.Equal(rootAddr2, rootAddr) +} diff --git a/swarm/storage/mru/testutil.go b/swarm/storage/mru/testutil.go new file mode 100644 index 000000000000..6efcba9aba3a --- /dev/null +++ b/swarm/storage/mru/testutil.go @@ -0,0 +1,53 @@ +// Copyright 2018 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package mru + +import ( + "fmt" + "path/filepath" + + "github.com/ethereum/go-ethereum/swarm/storage" +) + +const ( + testDbDirName = "mru" +) + +type TestHandler struct { + *Handler +} + +func (t *TestHandler) Close() { + t.chunkStore.Close() +} + +// NewTestHandler creates Handler object to be used for testing purposes. +func NewTestHandler(datadir string, params *HandlerParams) (*TestHandler, error) { + path := filepath.Join(datadir, testDbDirName) + rh := NewHandler(params) + localstoreparams := storage.NewDefaultLocalStoreParams() + localstoreparams.Init(path) + localStore, err := storage.NewLocalStore(localstoreparams, nil) + if err != nil { + return nil, fmt.Errorf("localstore create fail, path %s: %v", path, err) + } + localStore.Validators = append(localStore.Validators, storage.NewContentAddressValidator(storage.MakeHashFunc(resourceHashAlgorithm))) + localStore.Validators = append(localStore.Validators, rh) + netStore := storage.NewNetStore(localStore, nil) + rh.SetStore(netStore) + return &TestHandler{rh}, nil +} diff --git a/swarm/storage/mru/timestampprovider.go b/swarm/storage/mru/timestampprovider.go new file mode 100644 index 000000000000..f483491aa1cf --- /dev/null +++ b/swarm/storage/mru/timestampprovider.go @@ -0,0 +1,71 @@ +// Copyright 2018 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package mru + +import ( + "encoding/binary" + "time" +) + +// TimestampProvider sets the time source of the mru package +var TimestampProvider timestampProvider = NewDefaultTimestampProvider() + +// Encodes a point in time as a Unix epoch +type Timestamp struct { + Time uint64 // Unix epoch timestamp, in seconds +} + +// 8 bytes uint64 Time +const timestampLength = 8 + +// timestampProvider interface describes a source of timestamp information +type timestampProvider interface { + Now() Timestamp // returns the current timestamp information +} + +// binaryGet populates the timestamp structure from the given byte slice +func (t *Timestamp) binaryGet(data []byte) error { + if len(data) != timestampLength { + return NewError(ErrCorruptData, "timestamp data has the wrong size") + } + t.Time = binary.LittleEndian.Uint64(data[:8]) + return nil +} + +// binaryPut Serializes a Timestamp to a byte slice +func (t *Timestamp) binaryPut(data []byte) error { + if len(data) != timestampLength { + return NewError(ErrCorruptData, "timestamp data has the wrong size") + } + binary.LittleEndian.PutUint64(data, t.Time) + return nil +} + +type DefaultTimestampProvider struct { +} + +// NewDefaultTimestampProvider creates a system clock based timestamp provider +func NewDefaultTimestampProvider() *DefaultTimestampProvider { + return &DefaultTimestampProvider{} +} + +// Now returns the current time according to this provider +func (dtp *DefaultTimestampProvider) Now() Timestamp { + return Timestamp{ + Time: uint64(time.Now().Unix()), + } +} diff --git a/swarm/storage/mru/update.go b/swarm/storage/mru/update.go new file mode 100644 index 000000000000..d1bd37ddff02 --- /dev/null +++ b/swarm/storage/mru/update.go @@ -0,0 +1,148 @@ +// Copyright 2018 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package mru + +import ( + "encoding/binary" + "errors" + + "github.com/ethereum/go-ethereum/swarm/chunk" + "github.com/ethereum/go-ethereum/swarm/log" + "github.com/ethereum/go-ethereum/swarm/multihash" +) + +// resourceUpdate encapsulates the information sent as part of a resource update +type resourceUpdate struct { + updateHeader // metainformationa about this resource update + data []byte // actual data payload +} + +// Update chunk layout +// Prefix: +// 2 bytes updateHeaderLength +// 2 bytes data length +const chunkPrefixLength = 2 + 2 + +// Header: (see updateHeader) +// Data: +// data (datalength bytes) +// +// Minimum size is Header + 1 (minimum data length, enforced) +const minimumUpdateDataLength = updateHeaderLength + 1 +const maxUpdateDataLength = chunk.DefaultSize - signatureLength - updateHeaderLength - chunkPrefixLength + +// binaryPut serializes the resource update information into the given slice +func (r *resourceUpdate) binaryPut(serializedData []byte) error { + datalength := len(r.data) + if datalength == 0 { + return NewError(ErrInvalidValue, "cannot update a resource with no data") + } + + if datalength > maxUpdateDataLength { + return NewErrorf(ErrInvalidValue, "data is too big (length=%d). Max length=%d", datalength, maxUpdateDataLength) + } + + if len(serializedData) != r.binaryLength() { + return NewErrorf(ErrInvalidValue, "slice passed to putBinary must be of exact size. Expected %d bytes", r.binaryLength()) + } + + if r.multihash { + if _, _, err := multihash.GetMultihashLength(r.data); err != nil { + return NewError(ErrInvalidValue, "Invalid multihash") + } + } + + // Add prefix: updateHeaderLength and actual data length + cursor := 0 + binary.LittleEndian.PutUint16(serializedData[cursor:], uint16(updateHeaderLength)) + cursor += 2 + + // data length + binary.LittleEndian.PutUint16(serializedData[cursor:], uint16(datalength)) + cursor += 2 + + // serialize header (see updateHeader) + if err := r.updateHeader.binaryPut(serializedData[cursor : cursor+updateHeaderLength]); err != nil { + return err + } + cursor += updateHeaderLength + + // add the data + copy(serializedData[cursor:], r.data) + cursor += datalength + + return nil +} + +// binaryLength returns the expected number of bytes this structure will take to encode +func (r *resourceUpdate) binaryLength() int { + return chunkPrefixLength + updateHeaderLength + len(r.data) +} + +// binaryGet populates this instance from the information contained in the passed byte slice +func (r *resourceUpdate) binaryGet(serializedData []byte) error { + if len(serializedData) < minimumUpdateDataLength { + return NewErrorf(ErrNothingToReturn, "chunk less than %d bytes cannot be a resource update chunk", minimumUpdateDataLength) + } + cursor := 0 + declaredHeaderlength := binary.LittleEndian.Uint16(serializedData[cursor : cursor+2]) + if declaredHeaderlength != updateHeaderLength { + return NewErrorf(ErrCorruptData, "Invalid header length. Expected %d, got %d", updateHeaderLength, declaredHeaderlength) + } + + cursor += 2 + datalength := int(binary.LittleEndian.Uint16(serializedData[cursor : cursor+2])) + cursor += 2 + + if chunkPrefixLength+updateHeaderLength+datalength+signatureLength != len(serializedData) { + return NewError(ErrNothingToReturn, "length specified in header is different than actual chunk size") + } + + // at this point we can be satisfied that we have the correct data length to read + if err := r.updateHeader.binaryGet(serializedData[cursor : cursor+updateHeaderLength]); err != nil { + return err + } + cursor += updateHeaderLength + + data := serializedData[cursor : cursor+datalength] + cursor += datalength + + // if multihash content is indicated we check the validity of the multihash + if r.updateHeader.multihash { + mhLength, mhHeaderLength, err := multihash.GetMultihashLength(data) + if err != nil { + log.Error("multihash parse error", "err", err) + return err + } + if datalength != mhLength+mhHeaderLength { + log.Debug("multihash error", "datalength", datalength, "mhLength", mhLength, "mhHeaderLength", mhHeaderLength) + return errors.New("Corrupt multihash data") + } + } + + // now that all checks have passed, copy data into structure + r.data = make([]byte, datalength) + copy(r.data, data) + + return nil + +} + +// Multihash specifies whether the resource data should be interpreted as multihash +func (r *resourceUpdate) Multihash() bool { + return r.multihash +} diff --git a/swarm/storage/mru/update_test.go b/swarm/storage/mru/update_test.go new file mode 100644 index 000000000000..51e9d2fcc5ac --- /dev/null +++ b/swarm/storage/mru/update_test.go @@ -0,0 +1,72 @@ +package mru + +import ( + "bytes" + "testing" +) + +const serializedUpdateHex = "0x490034004f000000da070000fb0ed7efa696bdb0b54cd75554cc3117ffc891454317df7dd6fefad978e2f2fbf74a10ce8f26ffc8bfaa07c3031a34b2c61f517955e7deb1592daccf96c69cf000456c20717565206c6565206d7563686f207920616e6461206d7563686f2c207665206d7563686f20792073616265206d7563686f" +const serializedUpdateMultihashHex = "0x490022004f000000da070000fb0ed7efa696bdb0b54cd75554cc3117ffc891454317df7dd6fefad978e2f2fbf74a10ce8f26ffc8bfaa07c3031a34b2c61f517955e7deb1592daccf96c69cf0011b200102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1c1e1f20" + +func getTestResourceUpdate() *resourceUpdate { + return &resourceUpdate{ + updateHeader: *getTestUpdateHeader(false), + data: []byte("El que lee mucho y anda mucho, ve mucho y sabe mucho"), + } +} + +func getTestResourceUpdateMultihash() *resourceUpdate { + return &resourceUpdate{ + updateHeader: *getTestUpdateHeader(true), + data: []byte{0x1b, 0x20, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 28, 30, 31, 32}, + } +} + +func compareResourceUpdate(a, b *resourceUpdate) bool { + return compareUpdateHeader(&a.updateHeader, &b.updateHeader) && + bytes.Equal(a.data, b.data) +} + +func TestResourceUpdateSerializer(t *testing.T) { + var serializedUpdateLength = len(serializedUpdateHex)/2 - 1 // hack to calculate the byte length out of the hex representation + update := getTestResourceUpdate() + serializedUpdate := make([]byte, serializedUpdateLength) + if err := update.binaryPut(serializedUpdate); err != nil { + t.Fatal(err) + } + compareByteSliceToExpectedHex(t, "serializedUpdate", serializedUpdate, serializedUpdateHex) + + // Test fail if update does not contain data + update.data = nil + if err := update.binaryPut(serializedUpdate); err == nil { + t.Fatal("Expected resourceUpdate.binaryPut to fail since update does not contain data") + } + + // Test fail if update is too big + update.data = make([]byte, 10000) + if err := update.binaryPut(serializedUpdate); err == nil { + t.Fatal("Expected resourceUpdate.binaryPut to fail since update is too big") + } + + // Test fail if passed slice is not of the exact size required for this update + update.data = make([]byte, 1) + if err := update.binaryPut(serializedUpdate); err == nil { + t.Fatal("Expected resourceUpdate.binaryPut to fail since passed slice is not of the appropriate size") + } + + // Test serializing a multihash update + var serializedUpdateMultihashLength = len(serializedUpdateMultihashHex)/2 - 1 // hack to calculate the byte length out of the hex representation + update = getTestResourceUpdateMultihash() + serializedUpdate = make([]byte, serializedUpdateMultihashLength) + if err := update.binaryPut(serializedUpdate); err != nil { + t.Fatal(err) + } + compareByteSliceToExpectedHex(t, "serializedUpdate", serializedUpdate, serializedUpdateMultihashHex) + + // mess with the multihash to test it fails with a wrong multihash error + update.data[1] = 79 + if err := update.binaryPut(serializedUpdate); err == nil { + t.Fatal("Expected resourceUpdate.binaryPut to fail since data contains an invalid multihash") + } + +} diff --git a/swarm/storage/mru/updateheader.go b/swarm/storage/mru/updateheader.go new file mode 100644 index 000000000000..3ac20c1890ac --- /dev/null +++ b/swarm/storage/mru/updateheader.go @@ -0,0 +1,88 @@ +// Copyright 2018 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package mru + +import ( + "github.com/ethereum/go-ethereum/swarm/storage" +) + +// updateHeader models the non-payload components of a Resource Update +type updateHeader struct { + UpdateLookup // UpdateLookup contains the information required to locate this resource (components of the search key used to find it) + multihash bool // Whether the data in this Resource Update should be interpreted as multihash + metaHash []byte // SHA3 hash of the metadata chunk (less ownerAddr). Used to prove ownerhsip of the resource. +} + +const metaHashLength = storage.KeyLength + +// updateLookupLength bytes +// 1 byte flags (multihash bool for now) +// 32 bytes metaHash +const updateHeaderLength = updateLookupLength + 1 + metaHashLength + +// binaryPut serializes the resource header information into the given slice +func (h *updateHeader) binaryPut(serializedData []byte) error { + if len(serializedData) != updateHeaderLength { + return NewErrorf(ErrInvalidValue, "Incorrect slice size to serialize updateHeaderLength. Expected %d, got %d", updateHeaderLength, len(serializedData)) + } + if len(h.metaHash) != metaHashLength { + return NewError(ErrInvalidValue, "updateHeader.binaryPut called without metaHash set") + } + if err := h.UpdateLookup.binaryPut(serializedData[:updateLookupLength]); err != nil { + return err + } + cursor := updateLookupLength + copy(serializedData[cursor:], h.metaHash[:metaHashLength]) + cursor += metaHashLength + + var flags byte + if h.multihash { + flags |= 0x01 + } + + serializedData[cursor] = flags + cursor++ + + return nil +} + +// binaryLength returns the expected size of this structure when serialized +func (h *updateHeader) binaryLength() int { + return updateHeaderLength +} + +// binaryGet restores the current updateHeader instance from the information contained in the passed slice +func (h *updateHeader) binaryGet(serializedData []byte) error { + if len(serializedData) != updateHeaderLength { + return NewErrorf(ErrInvalidValue, "Incorrect slice size to read updateHeaderLength. Expected %d, got %d", updateHeaderLength, len(serializedData)) + } + + if err := h.UpdateLookup.binaryGet(serializedData[:updateLookupLength]); err != nil { + return err + } + cursor := updateLookupLength + h.metaHash = make([]byte, metaHashLength) + copy(h.metaHash[:storage.KeyLength], serializedData[cursor:cursor+storage.KeyLength]) + cursor += metaHashLength + + flags := serializedData[cursor] + cursor++ + + h.multihash = flags&0x01 != 0 + + return nil +} diff --git a/swarm/storage/mru/updateheader_test.go b/swarm/storage/mru/updateheader_test.go new file mode 100644 index 000000000000..b1f505989256 --- /dev/null +++ b/swarm/storage/mru/updateheader_test.go @@ -0,0 +1,64 @@ +package mru + +import ( + "bytes" + "testing" + + "github.com/ethereum/go-ethereum/common/hexutil" +) + +const serializedUpdateHeaderMultihashHex = "0x4f000000da070000fb0ed7efa696bdb0b54cd75554cc3117ffc891454317df7dd6fefad978e2f2fbf74a10ce8f26ffc8bfaa07c3031a34b2c61f517955e7deb1592daccf96c69cf001" + +func getTestUpdateHeader(multihash bool) (header *updateHeader) { + _, metaHash, _, _ := getTestMetadata().serializeAndHash() + return &updateHeader{ + UpdateLookup: *getTestUpdateLookup(), + multihash: multihash, + metaHash: metaHash, + } +} + +func compareUpdateHeader(a, b *updateHeader) bool { + return compareUpdateLookup(&a.UpdateLookup, &b.UpdateLookup) && + a.multihash == b.multihash && + bytes.Equal(a.metaHash, b.metaHash) +} + +func TestUpdateHeaderSerializer(t *testing.T) { + header := getTestUpdateHeader(true) + serializedHeader := make([]byte, updateHeaderLength) + if err := header.binaryPut(serializedHeader); err != nil { + t.Fatal(err) + } + compareByteSliceToExpectedHex(t, "serializedHeader", serializedHeader, serializedUpdateHeaderMultihashHex) + + // trigger incorrect slice length error passing a slice that is 1 byte too big + if err := header.binaryPut(make([]byte, updateHeaderLength+1)); err == nil { + t.Fatal("Expected updateHeader.binaryPut to fail since supplied slice is of incorrect length") + } + + // trigger invalid metaHash error + header.metaHash = nil + if err := header.binaryPut(serializedHeader); err == nil { + t.Fatal("Expected updateHeader.binaryPut to fail metaHash is of incorrect length") + } +} + +func TestUpdateHeaderDeserializer(t *testing.T) { + originalUpdate := getTestUpdateHeader(true) + serializedData, _ := hexutil.Decode(serializedUpdateHeaderMultihashHex) + var retrievedUpdate updateHeader + if err := retrievedUpdate.binaryGet(serializedData); err != nil { + t.Fatal(err) + } + if !compareUpdateHeader(originalUpdate, &retrievedUpdate) { + t.Fatalf("Expected deserialized structure to equal the original") + } + + // mess with source slice to test length checks + serializedData = []byte{1, 2, 3} + if err := retrievedUpdate.binaryGet(serializedData); err == nil { + t.Fatal("Expected retrievedUpdate.binaryGet, since passed slice is too small") + } + +} diff --git a/swarm/storage/netstore.go b/swarm/storage/netstore.go index 6a205cfa44d4..96a7e51f7747 100644 --- a/swarm/storage/netstore.go +++ b/swarm/storage/netstore.go @@ -17,9 +17,12 @@ package storage import ( + "context" "time" "github.com/ethereum/go-ethereum/swarm/log" + "github.com/ethereum/go-ethereum/swarm/spancontext" + opentracing "github.com/opentracing/opentracing-go" ) var ( @@ -43,10 +46,10 @@ var ( // access by calling network is blocking with a timeout type NetStore struct { localStore *LocalStore - retrieve func(chunk *Chunk) error + retrieve func(ctx context.Context, chunk *Chunk) error } -func NewNetStore(localStore *LocalStore, retrieve func(chunk *Chunk) error) *NetStore { +func NewNetStore(localStore *LocalStore, retrieve func(ctx context.Context, chunk *Chunk) error) *NetStore { return &NetStore{localStore, retrieve} } @@ -56,7 +59,14 @@ func NewNetStore(localStore *LocalStore, retrieve func(chunk *Chunk) error) *Net // Get uses get method to retrieve request, but retries if the // ErrChunkNotFound is returned by get, until the netStoreRetryTimeout // is reached. -func (ns *NetStore) Get(addr Address) (chunk *Chunk, err error) { +func (ns *NetStore) Get(ctx context.Context, addr Address) (chunk *Chunk, err error) { + + var sp opentracing.Span + ctx, sp = spancontext.StartSpan( + ctx, + "netstore.get.global") + defer sp.Finish() + timer := time.NewTimer(netStoreRetryTimeout) defer timer.Stop() @@ -84,7 +94,7 @@ func (ns *NetStore) Get(addr Address) (chunk *Chunk, err error) { defer limiter.Stop() for { - chunk, err := ns.get(addr, 0) + chunk, err := ns.get(ctx, addr, 0) if err != ErrChunkNotFound { // break retry only if the error is nil // or other error then ErrChunkNotFound @@ -122,16 +132,23 @@ func (ns *NetStore) Get(addr Address) (chunk *Chunk, err error) { } // GetWithTimeout makes a single retrieval attempt for a chunk with a explicit timeout parameter -func (ns *NetStore) GetWithTimeout(addr Address, timeout time.Duration) (chunk *Chunk, err error) { - return ns.get(addr, timeout) +func (ns *NetStore) GetWithTimeout(ctx context.Context, addr Address, timeout time.Duration) (chunk *Chunk, err error) { + return ns.get(ctx, addr, timeout) } -func (ns *NetStore) get(addr Address, timeout time.Duration) (chunk *Chunk, err error) { +func (ns *NetStore) get(ctx context.Context, addr Address, timeout time.Duration) (chunk *Chunk, err error) { if timeout == 0 { timeout = searchTimeout } + + var sp opentracing.Span + ctx, sp = spancontext.StartSpan( + ctx, + "netstore.get") + defer sp.Finish() + if ns.retrieve == nil { - chunk, err = ns.localStore.Get(addr) + chunk, err = ns.localStore.Get(ctx, addr) if err == nil { return chunk, nil } @@ -140,14 +157,14 @@ func (ns *NetStore) get(addr Address, timeout time.Duration) (chunk *Chunk, err } } else { var created bool - chunk, created = ns.localStore.GetOrCreateRequest(addr) + chunk, created = ns.localStore.GetOrCreateRequest(ctx, addr) if chunk.ReqC == nil { return chunk, nil } if created { - err := ns.retrieve(chunk) + err := ns.retrieve(ctx, chunk) if err != nil { // mark chunk request as failed so that we can retry it later chunk.SetErrored(ErrChunkUnavailable) @@ -171,8 +188,8 @@ func (ns *NetStore) get(addr Address, timeout time.Duration) (chunk *Chunk, err } // Put is the entrypoint for local store requests coming from storeLoop -func (ns *NetStore) Put(chunk *Chunk) { - ns.localStore.Put(chunk) +func (ns *NetStore) Put(ctx context.Context, chunk *Chunk) { + ns.localStore.Put(ctx, chunk) } // Close chunk store diff --git a/swarm/storage/netstore_test.go b/swarm/storage/netstore_test.go index 432a799d8647..7babbf5e0cc3 100644 --- a/swarm/storage/netstore_test.go +++ b/swarm/storage/netstore_test.go @@ -17,6 +17,7 @@ package storage import ( + "context" "encoding/hex" "errors" "io/ioutil" @@ -46,7 +47,7 @@ func newDummyChunk(addr Address) *Chunk { return chunk } -func (m *mockRetrieve) retrieve(chunk *Chunk) error { +func (m *mockRetrieve) retrieve(ctx context.Context, chunk *Chunk) error { hkey := hex.EncodeToString(chunk.Addr) m.requests[hkey] += 1 @@ -100,7 +101,7 @@ func TestNetstoreFailedRequest(t *testing.T) { // } // second call - _, err = netStore.Get(key) + _, err = netStore.Get(context.TODO(), key) if got := r.requests[hex.EncodeToString(key)]; got != 2 { t.Fatalf("expected to have called retrieve two times, but got: %v", got) } @@ -109,7 +110,7 @@ func TestNetstoreFailedRequest(t *testing.T) { } // third call - chunk, err := netStore.Get(key) + chunk, err := netStore.Get(context.TODO(), key) if got := r.requests[hex.EncodeToString(key)]; got != 3 { t.Fatalf("expected to have called retrieve three times, but got: %v", got) } diff --git a/swarm/storage/pyramid.go b/swarm/storage/pyramid.go index 01172cb77a47..36ff66d045d5 100644 --- a/swarm/storage/pyramid.go +++ b/swarm/storage/pyramid.go @@ -17,6 +17,7 @@ package storage import ( + "context" "encoding/binary" "errors" "io" @@ -24,6 +25,7 @@ import ( "sync" "time" + "github.com/ethereum/go-ethereum/swarm/chunk" "github.com/ethereum/go-ethereum/swarm/log" ) @@ -99,12 +101,12 @@ func NewPyramidSplitterParams(addr Address, reader io.Reader, putter Putter, get When splitting, data is given as a SectionReader, and the key is a hashSize long byte slice (Key), the root hash of the entire content will fill this once processing finishes. New chunks to store are store using the putter which the caller provides. */ -func PyramidSplit(reader io.Reader, putter Putter, getter Getter) (Address, func(), error) { - return NewPyramidSplitter(NewPyramidSplitterParams(nil, reader, putter, getter, DefaultChunkSize)).Split() +func PyramidSplit(ctx context.Context, reader io.Reader, putter Putter, getter Getter) (Address, func(context.Context) error, error) { + return NewPyramidSplitter(NewPyramidSplitterParams(nil, reader, putter, getter, chunk.DefaultSize)).Split(ctx) } -func PyramidAppend(addr Address, reader io.Reader, putter Putter, getter Getter) (Address, func(), error) { - return NewPyramidSplitter(NewPyramidSplitterParams(addr, reader, putter, getter, DefaultChunkSize)).Append() +func PyramidAppend(ctx context.Context, addr Address, reader io.Reader, putter Putter, getter Getter) (Address, func(context.Context) error, error) { + return NewPyramidSplitter(NewPyramidSplitterParams(addr, reader, putter, getter, chunk.DefaultSize)).Append(ctx) } // Entry to create a tree node @@ -203,7 +205,7 @@ func (pc *PyramidChunker) decrementWorkerCount() { pc.workerCount -= 1 } -func (pc *PyramidChunker) Split() (k Address, wait func(), err error) { +func (pc *PyramidChunker) Split(ctx context.Context) (k Address, wait func(context.Context) error, err error) { log.Debug("pyramid.chunker: Split()") pc.wg.Add(1) @@ -235,7 +237,7 @@ func (pc *PyramidChunker) Split() (k Address, wait func(), err error) { } -func (pc *PyramidChunker) Append() (k Address, wait func(), err error) { +func (pc *PyramidChunker) Append(ctx context.Context) (k Address, wait func(context.Context) error, err error) { log.Debug("pyramid.chunker: Append()") // Load the right most unfinished tree chunks in every level pc.loadTree() @@ -286,7 +288,7 @@ func (pc *PyramidChunker) processor(id int64) { func (pc *PyramidChunker) processChunk(id int64, job *chunkJob) { log.Debug("pyramid.chunker: processChunk()", "id", id) - ref, err := pc.putter.Put(job.chunk) + ref, err := pc.putter.Put(context.TODO(), job.chunk) if err != nil { pc.errC <- err } @@ -301,7 +303,7 @@ func (pc *PyramidChunker) processChunk(id int64, job *chunkJob) { func (pc *PyramidChunker) loadTree() error { log.Debug("pyramid.chunker: loadTree()") // Get the root chunk to get the total size - chunkData, err := pc.getter.Get(Reference(pc.key)) + chunkData, err := pc.getter.Get(context.TODO(), Reference(pc.key)) if err != nil { return errLoadingTreeRootChunk } @@ -354,7 +356,7 @@ func (pc *PyramidChunker) loadTree() error { branchCount = int64(len(ent.chunk)-8) / pc.hashSize for i := int64(0); i < branchCount; i++ { key := ent.chunk[8+(i*pc.hashSize) : 8+((i+1)*pc.hashSize)] - newChunkData, err := pc.getter.Get(Reference(key)) + newChunkData, err := pc.getter.Get(context.TODO(), Reference(key)) if err != nil { return errLoadingTreeChunk } @@ -416,7 +418,7 @@ func (pc *PyramidChunker) prepareChunks(isAppend bool) { lastKey := parent.chunk[8+lastBranch*pc.hashSize : 8+(lastBranch+1)*pc.hashSize] var err error - unfinishedChunkData, err = pc.getter.Get(lastKey) + unfinishedChunkData, err = pc.getter.Get(context.TODO(), lastKey) if err != nil { pc.errC <- err } diff --git a/swarm/storage/types.go b/swarm/storage/types.go index b75f64205ff1..53e3af485a70 100644 --- a/swarm/storage/types.go +++ b/swarm/storage/types.go @@ -18,6 +18,7 @@ package storage import ( "bytes" + "context" "crypto" "crypto/rand" "encoding/binary" @@ -29,6 +30,7 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/crypto/sha3" "github.com/ethereum/go-ethereum/swarm/bmt" + "github.com/ethereum/go-ethereum/swarm/chunk" ) const MaxPO = 16 @@ -113,7 +115,9 @@ func MakeHashFunc(hash string) SwarmHasher { case "BMT": return func() SwarmHash { hasher := sha3.NewKeccak256 - pool := bmt.NewTreePool(hasher, bmt.SegmentCount, bmt.PoolSize) + hasherSize := hasher().Size() + segmentCount := chunk.DefaultSize / hasherSize + pool := bmt.NewTreePool(hasher, segmentCount, bmt.PoolSize) return bmt.New(pool) } } @@ -229,8 +233,8 @@ func GenerateRandomChunk(dataSize int64) *Chunk { func GenerateRandomChunks(dataSize int64, count int) (chunks []*Chunk) { var i int hasher := MakeHashFunc(DefaultHash)() - if dataSize > DefaultChunkSize { - dataSize = DefaultChunkSize + if dataSize > chunk.DefaultSize { + dataSize = chunk.DefaultSize } for i = 0; i < count; i++ { @@ -249,7 +253,8 @@ func GenerateRandomChunks(dataSize int64, count int) (chunks []*Chunk) { // Size, Seek, Read, ReadAt type LazySectionReader interface { - Size(chan bool) (int64, error) + Context() context.Context + Size(context.Context, chan bool) (int64, error) io.Seeker io.Reader io.ReaderAt @@ -259,10 +264,14 @@ type LazyTestSectionReader struct { *io.SectionReader } -func (r *LazyTestSectionReader) Size(chan bool) (int64, error) { +func (r *LazyTestSectionReader) Size(context.Context, chan bool) (int64, error) { return r.SectionReader.Size(), nil } +func (r *LazyTestSectionReader) Context() context.Context { + return context.TODO() +} + type StoreParams struct { Hash SwarmHasher `toml:"-"` DbCapacity uint64 @@ -297,18 +306,18 @@ type Reference []byte // Putter is responsible to store data and create a reference for it type Putter interface { - Put(ChunkData) (Reference, error) + Put(context.Context, ChunkData) (Reference, error) // RefSize returns the length of the Reference created by this Putter RefSize() int64 // Close is to indicate that no more chunk data will be Put on this Putter Close() // Wait returns if all data has been store and the Close() was called. - Wait() + Wait(context.Context) error } // Getter is an interface to retrieve a chunk's data by its reference type Getter interface { - Get(Reference) (ChunkData, error) + Get(context.Context, Reference) (ChunkData, error) } // NOTE: this returns invalid data if chunk is encrypted @@ -339,6 +348,10 @@ func NewContentAddressValidator(hasher SwarmHasher) *ContentAddressValidator { // Validate that the given key is a valid content address for the given data func (v *ContentAddressValidator) Validate(addr Address, data []byte) bool { + if l := len(data); l < 9 || l > chunk.DefaultSize+8 { + return false + } + hasher := v.Hasher() hasher.ResetWithLength(data[:8]) hasher.Write(data[8:]) diff --git a/swarm/swarm.go b/swarm/swarm.go index 90360264eed1..736cd37de86b 100644 --- a/swarm/swarm.go +++ b/swarm/swarm.go @@ -21,6 +21,7 @@ import ( "context" "crypto/ecdsa" "fmt" + "io" "math/big" "net" "path/filepath" @@ -50,6 +51,7 @@ import ( "github.com/ethereum/go-ethereum/swarm/storage" "github.com/ethereum/go-ethereum/swarm/storage/mock" "github.com/ethereum/go-ethereum/swarm/storage/mru" + "github.com/ethereum/go-ethereum/swarm/tracing" ) var ( @@ -76,19 +78,19 @@ type Swarm struct { lstore *storage.LocalStore // local store, needs to store for releasing resources after node stopped sfs *fuse.SwarmFS // need this to cleanup all the active mounts on node exit ps *pss.Pss + + tracerClose io.Closer } type SwarmAPI struct { Api *api.API Backend chequebook.Backend - PrvKey *ecdsa.PrivateKey } func (self *Swarm) API() *SwarmAPI { return &SwarmAPI{ Api: self.api, Backend: self.backend, - PrvKey: self.privateKey, } } @@ -119,12 +121,10 @@ func NewSwarm(config *api.Config, mockStore *mock.NodeStore) (self *Swarm, err e backend: backend, privateKey: config.ShiftPrivateKey(), } - log.Debug(fmt.Sprintf("Setting up Swarm service components")) + log.Debug("Setting up Swarm service components") config.HiveParams.Discovery = true - log.Debug(fmt.Sprintf("-> swarm net store shared access layer to Swarm Chunk Store")) - nodeID, err := discover.HexID(config.NodeID) if err != nil { return nil, err @@ -139,6 +139,7 @@ func NewSwarm(config *api.Config, mockStore *mock.NodeStore) (self *Swarm, err e OverlayAddr: addr.OAddr, UnderlayAddr: addr.UAddr, HiveParams: config.HiveParams, + LightNode: config.LightNodeEnabled, } stateStore, err := state.NewDBStore(filepath.Join(config.Path, "state-store.db")) @@ -188,40 +189,17 @@ func NewSwarm(config *api.Config, mockStore *mock.NodeStore) (self *Swarm, err e self.fileStore = storage.NewFileStore(netStore, self.config.FileStoreParams) var resourceHandler *mru.Handler - rhparams := &mru.HandlerParams{ - // TODO: config parameter to set limits - QueryMaxPeriods: &mru.LookupParams{ - Limit: false, - }, - Signer: &mru.GenericSigner{ - PrivKey: self.privateKey, - }, - } - if resolver != nil { - resolver.SetNameHash(ens.EnsNode) - // Set HeaderGetter and OwnerValidator interfaces to resolver only if it is not nil. - rhparams.HeaderGetter = resolver - rhparams.OwnerValidator = resolver - } else { - log.Warn("No ETH API specified, resource updates will use block height approximation") - // TODO: blockestimator should use saved values derived from last time ethclient was connected - rhparams.HeaderGetter = mru.NewBlockEstimator() - } - resourceHandler, err = mru.NewHandler(rhparams) - if err != nil { - return nil, err - } + rhparams := &mru.HandlerParams{} + + resourceHandler = mru.NewHandler(rhparams) resourceHandler.SetStore(netStore) - var validators []storage.ChunkValidator - validators = append(validators, storage.NewContentAddressValidator(storage.MakeHashFunc(storage.DefaultHash))) - if resourceHandler != nil { - validators = append(validators, resourceHandler) + self.lstore.Validators = []storage.ChunkValidator{ + storage.NewContentAddressValidator(storage.MakeHashFunc(storage.DefaultHash)), + resourceHandler, } - self.lstore.Validators = validators - // setup local store - log.Debug(fmt.Sprintf("Set up local storage")) + log.Debug("Setup local storage") self.bzz = network.NewBzz(bzzconfig, to, stateStore, stream.Spec, self.streamer.Run) @@ -234,12 +212,10 @@ func NewSwarm(config *api.Config, mockStore *mock.NodeStore) (self *Swarm, err e pss.SetHandshakeController(self.ps, pss.NewHandshakeParams()) } - self.api = api.NewAPI(self.fileStore, self.dns, resourceHandler) - // Manifests for Smart Hosting - log.Debug(fmt.Sprintf("-> Web3 virtual server API")) + self.api = api.NewAPI(self.fileStore, self.dns, resourceHandler, self.privateKey) self.sfs = fuse.NewSwarmFS(self.api) - log.Debug("-> Initializing Fuse file system") + log.Debug("Initialized FUSE filesystem") return self, nil } @@ -356,9 +332,11 @@ Start is called when the stack is started func (self *Swarm) Start(srv *p2p.Server) error { startTime = time.Now() + self.tracerClose = tracing.Closer + // update uaddr to correct enode newaddr := self.bzz.UpdateLocalAddr([]byte(srv.Self().String())) - log.Warn("Updated bzz local addr", "oaddr", fmt.Sprintf("%x", newaddr.OAddr), "uaddr", fmt.Sprintf("%s", newaddr.UAddr)) + log.Info("Updated bzz local addr", "oaddr", fmt.Sprintf("%x", newaddr.OAddr), "uaddr", fmt.Sprintf("%s", newaddr.UAddr)) // set chequebook if self.config.SwapEnabled { ctx := context.Background() // The initial setup has no deadline. @@ -371,33 +349,35 @@ func (self *Swarm) Start(srv *p2p.Server) error { log.Debug(fmt.Sprintf("SWAP disabled: no cheque book set")) } - log.Warn(fmt.Sprintf("Starting Swarm service")) + log.Info("Starting bzz service") err := self.bzz.Start(srv) if err != nil { log.Error("bzz failed", "err", err) return err } - log.Info(fmt.Sprintf("Swarm network started on bzz address: %x", self.bzz.Hive.Overlay.BaseAddr())) + log.Info("Swarm network started", "bzzaddr", fmt.Sprintf("%x", self.bzz.Hive.Overlay.BaseAddr())) if self.ps != nil { self.ps.Start(srv) - log.Info("Pss started") } // start swarm http proxy server if self.config.Port != "" { addr := net.JoinHostPort(self.config.ListenAddr, self.config.Port) - go httpapi.StartHTTPServer(self.api, &httpapi.ServerConfig{ - Addr: addr, - CorsString: self.config.Cors, - }) - } + server := httpapi.NewServer(self.api, self.config.Cors) - log.Debug(fmt.Sprintf("Swarm http proxy started on port: %v", self.config.Port)) + if self.config.Cors != "" { + log.Debug("Swarm HTTP proxy CORS headers", "allowedOrigins", self.config.Cors) + } - if self.config.Cors != "" { - log.Debug(fmt.Sprintf("Swarm http proxy started with corsdomain: %v", self.config.Cors)) + log.Debug("Starting Swarm HTTP proxy", "port", self.config.Port) + go func() { + err := server.ListenAndServe(addr) + if err != nil { + log.Error("Could not start Swarm HTTP proxy", "err", err.Error()) + } + }() } self.periodicallyUpdateGauges() @@ -425,6 +405,13 @@ func (self *Swarm) updateGauges() { // implements the node.Service interface // stops all component services. func (self *Swarm) Stop() error { + if self.tracerClose != nil { + err := self.tracerClose.Close() + if err != nil { + return err + } + } + if self.ps != nil { self.ps.Stop() } diff --git a/swarm/swarm_test.go b/swarm/swarm_test.go index f82a9c6fac04..0827748ae200 100644 --- a/swarm/swarm_test.go +++ b/swarm/swarm_test.go @@ -17,10 +17,13 @@ package swarm import ( + "context" + "encoding/hex" "io/ioutil" "math/rand" "os" "path" + "runtime" "strings" "testing" "time" @@ -42,6 +45,13 @@ func TestNewSwarm(t *testing.T) { // a simple rpc endpoint for testing dialing ipcEndpoint := path.Join(dir, "TestSwarm.ipc") + // windows namedpipes are not on filesystem but on NPFS + if runtime.GOOS == "windows" { + b := make([]byte, 8) + rand.Read(b) + ipcEndpoint = `\\.\pipe\TestSwarm-` + hex.EncodeToString(b) + } + _, server, err := rpc.StartIPCEndpoint(ipcEndpoint, nil) if err != nil { t.Error(err) @@ -338,15 +348,19 @@ func testLocalStoreAndRetrieve(t *testing.T, swarm *Swarm, n int, randomData boo } dataPut := string(slice) - k, wait, err := swarm.api.Store(strings.NewReader(dataPut), int64(len(dataPut)), false) + ctx := context.TODO() + k, wait, err := swarm.api.Store(ctx, strings.NewReader(dataPut), int64(len(dataPut)), false) if err != nil { t.Fatal(err) } if wait != nil { - wait() + err = wait(ctx) + if err != nil { + t.Fatal(err) + } } - r, _ := swarm.api.Retrieve(k) + r, _ := swarm.api.Retrieve(context.TODO(), k) d, err := ioutil.ReadAll(r) if err != nil { diff --git a/swarm/testutil/http.go b/swarm/testutil/http.go index debf0b14b500..7fd60fcc3d2b 100644 --- a/swarm/testutil/http.go +++ b/swarm/testutil/http.go @@ -17,15 +17,12 @@ package testutil import ( - "context" "io/ioutil" - "math/big" "net/http" "net/http/httptest" "os" "testing" - "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/swarm/api" "github.com/ethereum/go-ethereum/swarm/storage" "github.com/ethereum/go-ethereum/swarm/storage/mru" @@ -35,16 +32,17 @@ type TestServer interface { ServeHTTP(http.ResponseWriter, *http.Request) } -type fakeBackend struct { - blocknumber int64 +// simulated timeProvider +type fakeTimeProvider struct { + currentTime uint64 } -func (f *fakeBackend) HeaderByNumber(context context.Context, _ string, bigblock *big.Int) (*types.Header, error) { - f.blocknumber++ - biggie := big.NewInt(f.blocknumber) - return &types.Header{ - Number: biggie, - }, nil +func (f *fakeTimeProvider) Tick() { + f.currentTime++ +} + +func (f *fakeTimeProvider) Now() mru.Timestamp { + return mru.Timestamp{Time: f.currentTime} } func NewTestSwarmServer(t *testing.T, serverFunc func(*api.API) TestServer) *TestSwarmServer { @@ -68,24 +66,25 @@ func NewTestSwarmServer(t *testing.T, serverFunc func(*api.API) TestServer) *Tes if err != nil { t.Fatal(err) } - rhparams := &mru.HandlerParams{ - QueryMaxPeriods: &mru.LookupParams{}, - HeaderGetter: &fakeBackend{ - blocknumber: 42, - }, + + fakeTimeProvider := &fakeTimeProvider{ + currentTime: 42, } + mru.TimestampProvider = fakeTimeProvider + rhparams := &mru.HandlerParams{} rh, err := mru.NewTestHandler(resourceDir, rhparams) if err != nil { t.Fatal(err) } - a := api.NewAPI(fileStore, nil, rh) + a := api.NewAPI(fileStore, nil, rh.Handler, nil) srv := httptest.NewServer(serverFunc(a)) return &TestSwarmServer{ - Server: srv, - FileStore: fileStore, - dir: dir, - Hasher: storage.MakeHashFunc(storage.DefaultHash)(), + Server: srv, + FileStore: fileStore, + dir: dir, + Hasher: storage.MakeHashFunc(storage.DefaultHash)(), + timestampProvider: fakeTimeProvider, cleanup: func() { srv.Close() rh.Close() @@ -97,12 +96,17 @@ func NewTestSwarmServer(t *testing.T, serverFunc func(*api.API) TestServer) *Tes type TestSwarmServer struct { *httptest.Server - Hasher storage.SwarmHash - FileStore *storage.FileStore - dir string - cleanup func() + Hasher storage.SwarmHash + FileStore *storage.FileStore + dir string + cleanup func() + timestampProvider *fakeTimeProvider } func (t *TestSwarmServer) Close() { t.cleanup() } + +func (t *TestSwarmServer) GetCurrentTime() mru.Timestamp { + return t.timestampProvider.Now() +} diff --git a/swarm/tracing/tracing.go b/swarm/tracing/tracing.go new file mode 100644 index 000000000000..b84cfb3102d5 --- /dev/null +++ b/swarm/tracing/tracing.go @@ -0,0 +1,103 @@ +package tracing + +import ( + "io" + "os" + "strings" + "time" + + "github.com/ethereum/go-ethereum/log" + jaeger "github.com/uber/jaeger-client-go" + jaegercfg "github.com/uber/jaeger-client-go/config" + jaegerlog "github.com/uber/jaeger-client-go/log" + cli "gopkg.in/urfave/cli.v1" +) + +var Enabled bool = false + +// TracingEnabledFlag is the CLI flag name to use to enable trace collections. +const TracingEnabledFlag = "tracing" + +var ( + Closer io.Closer +) + +var ( + TracingFlag = cli.BoolFlag{ + Name: TracingEnabledFlag, + Usage: "Enable tracing", + } + TracingEndpointFlag = cli.StringFlag{ + Name: "tracing.endpoint", + Usage: "Tracing endpoint", + Value: "0.0.0.0:6831", + } + TracingSvcFlag = cli.StringFlag{ + Name: "tracing.svc", + Usage: "Tracing service name", + Value: "swarm", + } +) + +// Flags holds all command-line flags required for tracing collection. +var Flags = []cli.Flag{ + TracingFlag, + TracingEndpointFlag, + TracingSvcFlag, +} + +// Init enables or disables the open tracing system. +func init() { + for _, arg := range os.Args { + if flag := strings.TrimLeft(arg, "-"); flag == TracingEnabledFlag { + Enabled = true + } + } +} + +func Setup(ctx *cli.Context) { + if Enabled { + log.Info("Enabling opentracing") + var ( + endpoint = ctx.GlobalString(TracingEndpointFlag.Name) + svc = ctx.GlobalString(TracingSvcFlag.Name) + ) + + Closer = initTracer(endpoint, svc) + } +} + +func initTracer(endpoint, svc string) (closer io.Closer) { + // Sample configuration for testing. Use constant sampling to sample every trace + // and enable LogSpan to log every span via configured Logger. + cfg := jaegercfg.Configuration{ + Sampler: &jaegercfg.SamplerConfig{ + Type: jaeger.SamplerTypeConst, + Param: 1, + }, + Reporter: &jaegercfg.ReporterConfig{ + LogSpans: true, + BufferFlushInterval: 1 * time.Second, + LocalAgentHostPort: endpoint, + }, + } + + // Example logger and metrics factory. Use github.com/uber/jaeger-client-go/log + // and github.com/uber/jaeger-lib/metrics respectively to bind to real logging and metrics + // frameworks. + jLogger := jaegerlog.StdLogger + //jMetricsFactory := metrics.NullFactory + + // Initialize tracer with a logger and a metrics factory + closer, err := cfg.InitGlobalTracer( + svc, + jaegercfg.Logger(jLogger), + //jaegercfg.Metrics(jMetricsFactory), + //jaegercfg.Observer(rpcmetrics.NewObserver(jMetricsFactory, rpcmetrics.DefaultNameNormalizer)), + ) + if err != nil { + log.Error("Could not initialize Jaeger tracer", "err", err) + } + + return closer +} diff --git a/swarm/version/version.go b/swarm/version/version.go new file mode 100644 index 000000000000..1f0c646197df --- /dev/null +++ b/swarm/version/version.go @@ -0,0 +1,64 @@ +// Copyright 2018 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package version + +import ( + "fmt" +) + +const ( + VersionMajor = 0 // Major version component of the current release + VersionMinor = 3 // Minor version component of the current release + VersionPatch = 2 // Patch version component of the current release + VersionMeta = "stable" // Version metadata to append to the version string +) + +// Version holds the textual version string. +var Version = func() string { + return fmt.Sprintf("%d.%d.%d", VersionMajor, VersionMinor, VersionPatch) +}() + +// VersionWithMeta holds the textual version string including the metadata. +var VersionWithMeta = func() string { + v := Version + if VersionMeta != "" { + v += "-" + VersionMeta + } + return v +}() + +// ArchiveVersion holds the textual version string used for Swarm archives. +// e.g. "0.3.0-dea1ce05" for stable releases, or +// "0.3.1-unstable-21c059b6" for unstable releases +func ArchiveVersion(gitCommit string) string { + vsn := Version + if VersionMeta != "stable" { + vsn += "-" + VersionMeta + } + if len(gitCommit) >= 8 { + vsn += "-" + gitCommit[:8] + } + return vsn +} + +func VersionWithCommit(gitCommit string) string { + vsn := Version + if len(gitCommit) >= 8 { + vsn += "-" + gitCommit[:8] + } + return vsn +} diff --git a/tests/init.go b/tests/init.go index af65f92069e4..f0a4943c1356 100644 --- a/tests/init.go +++ b/tests/init.go @@ -53,6 +53,16 @@ var Forks = map[string]*params.ChainConfig{ DAOForkBlock: big.NewInt(0), ByzantiumBlock: big.NewInt(0), }, + "Constantinople": { + ChainID: big.NewInt(1), + HomesteadBlock: big.NewInt(0), + EIP150Block: big.NewInt(0), + EIP155Block: big.NewInt(0), + EIP158Block: big.NewInt(0), + DAOForkBlock: big.NewInt(0), + ByzantiumBlock: big.NewInt(0), + ConstantinopleBlock: big.NewInt(0), + }, "FrontierToHomesteadAt5": { ChainID: big.NewInt(1), HomesteadBlock: big.NewInt(5), diff --git a/trie/database.go b/trie/database.go index df7fc01eaba6..d0691b637e3a 100644 --- a/trie/database.go +++ b/trie/database.go @@ -51,7 +51,7 @@ const secureKeyLength = 11 + 32 // DatabaseReader wraps the Get and Has method of a backing store for the trie. type DatabaseReader interface { - // Get retrieves the value associated with key form the database. + // Get retrieves the value associated with key from the database. Get(key []byte) (value []byte, err error) // Has retrieves whether a key is present in the database. @@ -431,6 +431,11 @@ func (db *Database) reference(child common.Hash, parent common.Hash) { // Dereference removes an existing reference from a root node. func (db *Database) Dereference(root common.Hash) { + // Sanity check to ensure that the meta-root is not removed + if root == (common.Hash{}) { + log.Error("Attempted to dereference the trie cache meta root") + return + } db.lock.Lock() defer db.lock.Unlock() @@ -475,9 +480,14 @@ func (db *Database) dereference(child common.Hash, parent common.Hash) { } if node.parents == 0 { // Remove the node from the flush-list - if child == db.oldest { + switch child { + case db.oldest: db.oldest = node.flushNext - } else { + db.nodes[node.flushNext].flushPrev = common.Hash{} + case db.newest: + db.newest = node.flushPrev + db.nodes[node.flushPrev].flushNext = common.Hash{} + default: db.nodes[node.flushPrev].flushNext = node.flushNext db.nodes[node.flushNext].flushPrev = node.flushPrev } @@ -697,9 +707,14 @@ func (db *Database) uncache(hash common.Hash) { return } // Node still exists, remove it from the flush-list - if hash == db.oldest { + switch hash { + case db.oldest: db.oldest = node.flushNext - } else { + db.nodes[node.flushNext].flushPrev = common.Hash{} + case db.newest: + db.newest = node.flushPrev + db.nodes[node.flushPrev].flushNext = common.Hash{} + default: db.nodes[node.flushPrev].flushNext = node.flushNext db.nodes[node.flushNext].flushPrev = node.flushPrev } diff --git a/trie/node.go b/trie/node.go index a06f1b3898f3..1fafb7a53825 100644 --- a/trie/node.go +++ b/trie/node.go @@ -55,7 +55,7 @@ var nilValueNode = valueNode(nil) func (n *fullNode) EncodeRLP(w io.Writer) error { var nodes [17]node - for i, child := range n.Children { + for i, child := range &n.Children { if child != nil { nodes[i] = child } else { @@ -98,7 +98,7 @@ func (n valueNode) String() string { return n.fstring("") } func (n *fullNode) fstring(ind string) string { resp := fmt.Sprintf("[\n%s ", ind) - for i, node := range n.Children { + for i, node := range &n.Children { if node == nil { resp += fmt.Sprintf("%s: ", indices[i]) } else { diff --git a/trie/trie.go b/trie/trie.go index 4284e30ad40e..e920ccd23f10 100644 --- a/trie/trie.go +++ b/trie/trie.go @@ -356,7 +356,7 @@ func (t *Trie) delete(n node, prefix, key []byte) (bool, node, error) { // value that is left in n or -2 if n contains at least two // values. pos := -1 - for i, cld := range n.Children { + for i, cld := range &n.Children { if cld != nil { if pos == -1 { pos = i diff --git a/vendor/github.com/Azure/azure-storage-go/LICENSE b/vendor/github.com/Azure/azure-pipeline-go/LICENSE similarity index 98% rename from vendor/github.com/Azure/azure-storage-go/LICENSE rename to vendor/github.com/Azure/azure-pipeline-go/LICENSE index 21071075c245..d1ca00f20a89 100644 --- a/vendor/github.com/Azure/azure-storage-go/LICENSE +++ b/vendor/github.com/Azure/azure-pipeline-go/LICENSE @@ -18,4 +18,4 @@ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - SOFTWARE + SOFTWARE \ No newline at end of file diff --git a/vendor/github.com/Azure/azure-pipeline-go/pipeline/core.go b/vendor/github.com/Azure/azure-pipeline-go/pipeline/core.go new file mode 100755 index 000000000000..0dde81d728c9 --- /dev/null +++ b/vendor/github.com/Azure/azure-pipeline-go/pipeline/core.go @@ -0,0 +1,255 @@ +package pipeline + +import ( + "context" + "net" + "net/http" + "os" + "time" +) + +// The Factory interface represents an object that can create its Policy object. Each HTTP request sent +// requires that this Factory create a new instance of its Policy object. +type Factory interface { + New(next Policy, po *PolicyOptions) Policy +} + +// FactoryFunc is an adapter that allows the use of an ordinary function as a Factory interface. +type FactoryFunc func(next Policy, po *PolicyOptions) PolicyFunc + +// New calls f(next,po). +func (f FactoryFunc) New(next Policy, po *PolicyOptions) Policy { + return f(next, po) +} + +// The Policy interface represents a mutable Policy object created by a Factory. The object can mutate/process +// the HTTP request and then forward it on to the next Policy object in the linked-list. The returned +// Response goes backward through the linked-list for additional processing. +// NOTE: Request is passed by value so changes do not change the caller's version of +// the request. However, Request has some fields that reference mutable objects (not strings). +// These references are copied; a deep copy is not performed. Specifically, this means that +// you should avoid modifying the objects referred to by these fields: URL, Header, Body, +// GetBody, TransferEncoding, Form, MultipartForm, Trailer, TLS, Cancel, and Response. +type Policy interface { + Do(ctx context.Context, request Request) (Response, error) +} + +// PolicyFunc is an adapter that allows the use of an ordinary function as a Policy interface. +type PolicyFunc func(ctx context.Context, request Request) (Response, error) + +// Do calls f(ctx, request). +func (f PolicyFunc) Do(ctx context.Context, request Request) (Response, error) { + return f(ctx, request) +} + +// Options configures a Pipeline's behavior. +type Options struct { + HTTPSender Factory // If sender is nil, then the pipeline's default client is used to send the HTTP requests. + Log LogOptions +} + +// LogLevel tells a logger the minimum level to log. When code reports a log entry, +// the LogLevel indicates the level of the log entry. The logger only records entries +// whose level is at least the level it was told to log. See the Log* constants. +// For example, if a logger is configured with LogError, then LogError, LogPanic, +// and LogFatal entries will be logged; lower level entries are ignored. +type LogLevel uint32 + +const ( + // LogNone tells a logger not to log any entries passed to it. + LogNone LogLevel = iota + + // LogFatal tells a logger to log all LogFatal entries passed to it. + LogFatal + + // LogPanic tells a logger to log all LogPanic and LogFatal entries passed to it. + LogPanic + + // LogError tells a logger to log all LogError, LogPanic and LogFatal entries passed to it. + LogError + + // LogWarning tells a logger to log all LogWarning, LogError, LogPanic and LogFatal entries passed to it. + LogWarning + + // LogInfo tells a logger to log all LogInfo, LogWarning, LogError, LogPanic and LogFatal entries passed to it. + LogInfo + + // LogDebug tells a logger to log all LogDebug, LogInfo, LogWarning, LogError, LogPanic and LogFatal entries passed to it. + LogDebug +) + +// LogOptions configures the pipeline's logging mechanism & level filtering. +type LogOptions struct { + Log func(level LogLevel, message string) + + // ShouldLog is called periodically allowing you to return whether the specified LogLevel should be logged or not. + // An application can return different values over the its lifetime; this allows the application to dynamically + // alter what is logged. NOTE: This method can be called by multiple goroutines simultaneously so make sure + // you implement it in a goroutine-safe way. If nil, nothing is logged (the equivalent of returning LogNone). + // Usually, the function will be implemented simply like this: return level <= LogWarning + ShouldLog func(level LogLevel) bool +} + +type pipeline struct { + factories []Factory + options Options +} + +// The Pipeline interface represents an ordered list of Factory objects and an object implementing the HTTPSender interface. +// You construct a Pipeline by calling the pipeline.NewPipeline function. To send an HTTP request, call pipeline.NewRequest +// and then call Pipeline's Do method passing a context, the request, and a method-specific Factory (or nil). Passing a +// method-specific Factory allows this one call to Do to inject a Policy into the linked-list. The policy is injected where +// the MethodFactoryMarker (see the pipeline.MethodFactoryMarker function) is in the slice of Factory objects. +// +// When Do is called, the Pipeline object asks each Factory object to construct its Policy object and adds each Policy to a linked-list. +// THen, Do sends the Context and Request through all the Policy objects. The final Policy object sends the request over the network +// (via the HTTPSender object passed to NewPipeline) and the response is returned backwards through all the Policy objects. +// Since Pipeline and Factory objects are goroutine-safe, you typically create 1 Pipeline object and reuse it to make many HTTP requests. +type Pipeline interface { + Do(ctx context.Context, methodFactory Factory, request Request) (Response, error) +} + +// NewPipeline creates a new goroutine-safe Pipeline object from the slice of Factory objects and the specified options. +func NewPipeline(factories []Factory, o Options) Pipeline { + if o.HTTPSender == nil { + o.HTTPSender = newDefaultHTTPClientFactory() + } + if o.Log.Log == nil { + o.Log.Log = func(LogLevel, string) {} // No-op logger + } + return &pipeline{factories: factories, options: o} +} + +// Do is called for each and every HTTP request. It tells each Factory to create its own (mutable) Policy object +// replacing a MethodFactoryMarker factory (if it exists) with the methodFactory passed in. Then, the Context and Request +// are sent through the pipeline of Policy objects (which can transform the Request's URL/query parameters/headers) and +// ultimately sends the transformed HTTP request over the network. +func (p *pipeline) Do(ctx context.Context, methodFactory Factory, request Request) (Response, error) { + response, err := p.newPolicies(methodFactory).Do(ctx, request) + request.close() + return response, err +} + +func (p *pipeline) newPolicies(methodFactory Factory) Policy { + // The last Policy is the one that actually sends the request over the wire and gets the response. + // It is overridable via the Options' HTTPSender field. + po := &PolicyOptions{pipeline: p} // One object shared by all policy objects + next := p.options.HTTPSender.New(nil, po) + + // Walk over the slice of Factory objects in reverse (from wire to API) + markers := 0 + for i := len(p.factories) - 1; i >= 0; i-- { + factory := p.factories[i] + if _, ok := factory.(methodFactoryMarker); ok { + markers++ + if markers > 1 { + panic("MethodFactoryMarker can only appear once in the pipeline") + } + if methodFactory != nil { + // Replace MethodFactoryMarker with passed-in methodFactory + next = methodFactory.New(next, po) + } + } else { + // Use the slice's Factory to construct its Policy + next = factory.New(next, po) + } + } + + // Each Factory has created its Policy + if markers == 0 && methodFactory != nil { + panic("Non-nil methodFactory requires MethodFactoryMarker in the pipeline") + } + return next // Return head of the Policy object linked-list +} + +// A PolicyOptions represents optional information that can be used by a node in the +// linked-list of Policy objects. A PolicyOptions is passed to the Factory's New method +// which passes it (if desired) to the Policy object it creates. Today, the Policy object +// uses the options to perform logging. But, in the future, this could be used for more. +type PolicyOptions struct { + pipeline *pipeline +} + +// ShouldLog returns true if the specified log level should be logged. +func (po *PolicyOptions) ShouldLog(level LogLevel) bool { + if po.pipeline.options.Log.ShouldLog != nil { + return po.pipeline.options.Log.ShouldLog(level) + } + return false +} + +// Log logs a string to the Pipeline's Logger. +func (po *PolicyOptions) Log(level LogLevel, msg string) { + if !po.ShouldLog(level) { + return // Short circuit message formatting if we're not logging it + } + + // We are logging it, ensure trailing newline + if len(msg) == 0 || msg[len(msg)-1] != '\n' { + msg += "\n" // Ensure trailing newline + } + po.pipeline.options.Log.Log(level, msg) + + // If logger doesn't handle fatal/panic, we'll do it here. + if level == LogFatal { + os.Exit(1) + } else if level == LogPanic { + panic(msg) + } +} + +var pipelineHTTPClient = newDefaultHTTPClient() + +func newDefaultHTTPClient() *http.Client { + // We want the Transport to have a large connection pool + return &http.Client{ + Transport: &http.Transport{ + Proxy: http.ProxyFromEnvironment, + // We use Dial instead of DialContext as DialContext has been reported to cause slower performance. + Dial /*Context*/ : (&net.Dialer{ + Timeout: 30 * time.Second, + KeepAlive: 30 * time.Second, + DualStack: true, + }).Dial, /*Context*/ + MaxIdleConns: 0, // No limit + MaxIdleConnsPerHost: 100, + IdleConnTimeout: 90 * time.Second, + TLSHandshakeTimeout: 10 * time.Second, + ExpectContinueTimeout: 1 * time.Second, + DisableKeepAlives: false, + DisableCompression: false, + MaxResponseHeaderBytes: 0, + //ResponseHeaderTimeout: time.Duration{}, + //ExpectContinueTimeout: time.Duration{}, + }, + } +} + +// newDefaultHTTPClientFactory creates a DefaultHTTPClientPolicyFactory object that sends HTTP requests to a Go's default http.Client. +func newDefaultHTTPClientFactory() Factory { + return FactoryFunc(func(next Policy, po *PolicyOptions) PolicyFunc { + return func(ctx context.Context, request Request) (Response, error) { + r, err := pipelineHTTPClient.Do(request.WithContext(ctx)) + if err != nil { + err = NewError(err, "HTTP request failed") + } + return NewHTTPResponse(r), err + } + }) +} + +var mfm = methodFactoryMarker{} // Singleton + +// MethodFactoryMarker returns a special marker Factory object. When Pipeline's Do method is called, any +// MethodMarkerFactory object is replaced with the specified methodFactory object. If nil is passed fro Do's +// methodFactory parameter, then the MethodFactoryMarker is ignored as the linked-list of Policy objects is created. +func MethodFactoryMarker() Factory { + return mfm +} + +type methodFactoryMarker struct { +} + +func (methodFactoryMarker) New(next Policy, po *PolicyOptions) Policy { + panic("methodFactoryMarker policy should have been replaced with a method policy") +} diff --git a/vendor/github.com/Azure/azure-pipeline-go/pipeline/defaultlog_syslog.go b/vendor/github.com/Azure/azure-pipeline-go/pipeline/defaultlog_syslog.go new file mode 100755 index 000000000000..d0bb7740769c --- /dev/null +++ b/vendor/github.com/Azure/azure-pipeline-go/pipeline/defaultlog_syslog.go @@ -0,0 +1,33 @@ +// +build !windows,!nacl,!plan9 + +package pipeline + +import ( + "log" + "log/syslog" +) + +// ForceLog should rarely be used. It forceable logs an entry to the +// Windows Event Log (on Windows) or to the SysLog (on Linux) +func ForceLog(level LogLevel, msg string) { + if defaultLogger == nil { + return // Return fast if we failed to create the logger. + } + // We are logging it, ensure trailing newline + if len(msg) == 0 || msg[len(msg)-1] != '\n' { + msg += "\n" // Ensure trailing newline + } + switch level { + case LogFatal: + defaultLogger.Fatal(msg) + case LogPanic: + defaultLogger.Panic(msg) + case LogError, LogWarning, LogInfo: + defaultLogger.Print(msg) + } +} + +var defaultLogger = func() *log.Logger { + l, _ := syslog.NewLogger(syslog.LOG_USER|syslog.LOG_WARNING, log.LstdFlags) + return l +}() diff --git a/vendor/github.com/Azure/azure-pipeline-go/pipeline/defaultlog_windows.go b/vendor/github.com/Azure/azure-pipeline-go/pipeline/defaultlog_windows.go new file mode 100755 index 000000000000..cb67398995af --- /dev/null +++ b/vendor/github.com/Azure/azure-pipeline-go/pipeline/defaultlog_windows.go @@ -0,0 +1,61 @@ +package pipeline + +import ( + "os" + "syscall" + "unsafe" +) + +// ForceLog should rarely be used. It forceable logs an entry to the +// Windows Event Log (on Windows) or to the SysLog (on Linux) +func ForceLog(level LogLevel, msg string) { + var el eventType + switch level { + case LogError, LogFatal, LogPanic: + el = elError + case LogWarning: + el = elWarning + case LogInfo: + el = elInfo + } + // We are logging it, ensure trailing newline + if len(msg) == 0 || msg[len(msg)-1] != '\n' { + msg += "\n" // Ensure trailing newline + } + reportEvent(el, 0, msg) +} + +type eventType int16 + +const ( + elSuccess eventType = 0 + elError eventType = 1 + elWarning eventType = 2 + elInfo eventType = 4 +) + +var reportEvent = func() func(eventType eventType, eventID int32, msg string) { + advAPI32 := syscall.MustLoadDLL("AdvAPI32.dll") + registerEventSource := advAPI32.MustFindProc("RegisterEventSourceW") + + sourceName, _ := os.Executable() + sourceNameUTF16, _ := syscall.UTF16PtrFromString(sourceName) + handle, _, lastErr := registerEventSource.Call(uintptr(0), uintptr(unsafe.Pointer(sourceNameUTF16))) + if lastErr == nil { // On error, logging is a no-op + return func(eventType eventType, eventID int32, msg string) {} + } + reportEvent := advAPI32.MustFindProc("ReportEventW") + return func(eventType eventType, eventID int32, msg string) { + s, _ := syscall.UTF16PtrFromString(msg) + _, _, _ = reportEvent.Call( + uintptr(handle), // HANDLE hEventLog + uintptr(eventType), // WORD wType + uintptr(0), // WORD wCategory + uintptr(eventID), // DWORD dwEventID + uintptr(0), // PSID lpUserSid + uintptr(1), // WORD wNumStrings + uintptr(0), // DWORD dwDataSize + uintptr(unsafe.Pointer(&s)), // LPCTSTR *lpStrings + uintptr(0)) // LPVOID lpRawData + } +}() diff --git a/vendor/github.com/Azure/azure-pipeline-go/pipeline/doc.go b/vendor/github.com/Azure/azure-pipeline-go/pipeline/doc.go new file mode 100755 index 000000000000..b5ab05f4deee --- /dev/null +++ b/vendor/github.com/Azure/azure-pipeline-go/pipeline/doc.go @@ -0,0 +1,161 @@ +// Copyright 2017 Microsoft Corporation. All rights reserved. +// Use of this source code is governed by an MIT +// license that can be found in the LICENSE file. + +/* +Package pipeline implements an HTTP request/response middleware pipeline whose +policy objects mutate an HTTP request's URL, query parameters, and/or headers before +the request is sent over the wire. + +Not all policy objects mutate an HTTP request; some policy objects simply impact the +flow of requests/responses by performing operations such as logging, retry policies, +timeouts, failure injection, and deserialization of response payloads. + +Implementing the Policy Interface + +To implement a policy, define a struct that implements the pipeline.Policy interface's Do method. Your Do +method is called when an HTTP request wants to be sent over the network. Your Do method can perform any +operation(s) it desires. For example, it can log the outgoing request, mutate the URL, headers, and/or query +parameters, inject a failure, etc. Your Do method must then forward the HTTP request to next Policy object +in a linked-list ensuring that the remaining Policy objects perform their work. Ultimately, the last Policy +object sends the HTTP request over the network (by calling the HTTPSender's Do method). + +When an HTTP response comes back, each Policy object in the linked-list gets a chance to process the response +(in reverse order). The Policy object can log the response, retry the operation if due to a transient failure +or timeout, deserialize the response body, etc. Ultimately, the last Policy object returns the HTTP response +to the code that initiated the original HTTP request. + +Here is a template for how to define a pipeline.Policy object: + + type myPolicy struct { + node PolicyNode + // TODO: Add configuration/setting fields here (if desired)... + } + + func (p *myPolicy) Do(ctx context.Context, request pipeline.Request) (pipeline.Response, error) { + // TODO: Mutate/process the HTTP request here... + response, err := p.node.Do(ctx, request) // Forward HTTP request to next Policy & get HTTP response + // TODO: Mutate/process the HTTP response here... + return response, err // Return response/error to previous Policy + } + +Implementing the Factory Interface + +Each Policy struct definition requires a factory struct definition that implements the pipeline.Factory interface's New +method. The New method is called when application code wants to initiate a new HTTP request. Factory's New method is +passed a pipeline.PolicyNode object which contains a reference to the owning pipeline.Pipeline object (discussed later) and +a reference to the next Policy object in the linked list. The New method should create its corresponding Policy object +passing it the PolicyNode and any other configuration/settings fields appropriate for the specific Policy object. + +Here is a template for how to define a pipeline.Policy object: + + // NOTE: Once created & initialized, Factory objects should be goroutine-safe (ex: immutable); + // this allows reuse (efficient use of memory) and makes these objects usable by multiple goroutines concurrently. + type myPolicyFactory struct { + // TODO: Add any configuration/setting fields if desired... + } + + func (f *myPolicyFactory) New(node pipeline.PolicyNode) Policy { + return &myPolicy{node: node} // TODO: Also initialize any configuration/setting fields here (if desired)... + } + +Using your Factory and Policy objects via a Pipeline + +To use the Factory and Policy objects, an application constructs a slice of Factory objects and passes +this slice to the pipeline.NewPipeline function. + + func NewPipeline(factories []pipeline.Factory, sender pipeline.HTTPSender) Pipeline + +This function also requires an object implementing the HTTPSender interface. For simple scenarios, +passing nil for HTTPSender causes a standard Go http.Client object to be created and used to actually +send the HTTP response over the network. For more advanced scenarios, you can pass your own HTTPSender +object in. This allows sharing of http.Client objects or the use of custom-configured http.Client objects +or other objects that can simulate the network requests for testing purposes. + +Now that you have a pipeline.Pipeline object, you can create a pipeline.Request object (which is a simple +wrapper around Go's standard http.Request object) and pass it to Pipeline's Do method along with passing a +context.Context for cancelling the HTTP request (if desired). + + type Pipeline interface { + Do(ctx context.Context, methodFactory pipeline.Factory, request pipeline.Request) (pipeline.Response, error) + } + +Do iterates over the slice of Factory objects and tells each one to create its corresponding +Policy object. After the linked-list of Policy objects have been created, Do calls the first +Policy object passing it the Context & HTTP request parameters. These parameters now flow through +all the Policy objects giving each object a chance to look at and/or mutate the HTTP request. +The last Policy object sends the message over the network. + +When the network operation completes, the HTTP response and error return values pass +back through the same Policy objects in reverse order. Most Policy objects ignore the +response/error but some log the result, retry the operation (depending on the exact +reason the operation failed), or deserialize the response's body. Your own Policy +objects can do whatever they like when processing outgoing requests or incoming responses. + +Note that after an I/O request runs to completion, the Policy objects for that request +are garbage collected. However, Pipeline object (like Factory objects) are goroutine-safe allowing +them to be created once and reused over many I/O operations. This allows for efficient use of +memory and also makes them safely usable by multiple goroutines concurrently. + +Inserting a Method-Specific Factory into the Linked-List of Policy Objects + +While Pipeline and Factory objects can be reused over many different operations, it is +common to have special behavior for a specific operation/method. For example, a method +may need to deserialize the response's body to an instance of a specific data type. +To accommodate this, the Pipeline's Do method takes an additional method-specific +Factory object. The Do method tells this Factory to create a Policy object and +injects this method-specific Policy object into the linked-list of Policy objects. + +When creating a Pipeline object, the slice of Factory objects passed must have 1 +(and only 1) entry marking where the method-specific Factory should be injected. +The Factory marker is obtained by calling the pipeline.MethodFactoryMarker() function: + + func MethodFactoryMarker() pipeline.Factory + +Creating an HTTP Request Object + +The HTTP request object passed to Pipeline's Do method is not Go's http.Request struct. +Instead, it is a pipeline.Request struct which is a simple wrapper around Go's standard +http.Request. You create a pipeline.Request object by calling the pipeline.NewRequest function: + + func NewRequest(method string, url url.URL, options pipeline.RequestOptions) (request pipeline.Request, err error) + +To this function, you must pass a pipeline.RequestOptions that looks like this: + + type RequestOptions struct { + // The readable and seekable stream to be sent to the server as the request's body. + Body io.ReadSeeker + + // The callback method (if not nil) to be invoked to report progress as the stream is uploaded in the HTTP request. + Progress ProgressReceiver + } + +The method and struct ensure that the request's body stream is a read/seekable stream. +A seekable stream is required so that upon retry, the final Policy object can seek +the stream back to the beginning before retrying the network request and re-uploading the +body. In addition, you can associate a ProgressReceiver callback function which will be +invoked periodically to report progress while bytes are being read from the body stream +and sent over the network. + +Processing the HTTP Response + +When an HTTP response comes in from the network, a reference to Go's http.Response struct is +embedded in a struct that implements the pipeline.Response interface: + + type Response interface { + Response() *http.Response + } + +This interface is returned through all the Policy objects. Each Policy object can call the Response +interface's Response method to examine (or mutate) the embedded http.Response object. + +A Policy object can internally define another struct (implementing the pipeline.Response interface) +that embeds an http.Response and adds additional fields and return this structure to other Policy +objects. This allows a Policy object to deserialize the body to some other struct and return the +original http.Response and the additional struct back through the Policy chain. Other Policy objects +can see the Response but cannot see the additional struct with the deserialized body. After all the +Policy objects have returned, the pipeline.Response interface is returned by Pipeline's Do method. +The caller of this method can perform a type assertion attempting to get back to the struct type +really returned by the Policy object. If the type assertion is successful, the caller now has +access to both the http.Response and the deserialized struct object.*/ +package pipeline diff --git a/vendor/github.com/Azure/azure-pipeline-go/pipeline/error.go b/vendor/github.com/Azure/azure-pipeline-go/pipeline/error.go new file mode 100755 index 000000000000..fd008364d633 --- /dev/null +++ b/vendor/github.com/Azure/azure-pipeline-go/pipeline/error.go @@ -0,0 +1,121 @@ +package pipeline + +import ( + "fmt" + "runtime" +) + +type causer interface { + Cause() error +} + +// ErrorNode can be an embedded field in a private error object. This field +// adds Program Counter support and a 'cause' (reference to a preceding error). +// When initializing a error type with this embedded field, initialize the +// ErrorNode field by calling ErrorNode{}.Initialize(cause). +type ErrorNode struct { + pc uintptr // Represents a Program Counter that you can get symbols for. + cause error // Refers to the preceding error (or nil) +} + +// Error returns a string with the PC's symbols or "" if the PC is invalid. +// When defining a new error type, have its Error method call this one passing +// it the string representation of the error. +func (e *ErrorNode) Error(msg string) string { + s := "" + if fn := runtime.FuncForPC(e.pc); fn != nil { + file, line := fn.FileLine(e.pc) + s = fmt.Sprintf("-> %v, %v:%v\n", fn.Name(), file, line) + } + s += msg + "\n\n" + if e.cause != nil { + s += e.cause.Error() + "\n" + } + return s +} + +// Cause returns the error that preceded this error. +func (e *ErrorNode) Cause() error { return e.cause } + +// Temporary returns true if the error occurred due to a temporary condition. +func (e ErrorNode) Temporary() bool { + type temporary interface { + Temporary() bool + } + + for err := e.cause; err != nil; { + if t, ok := err.(temporary); ok { + return t.Temporary() + } + + if cause, ok := err.(causer); ok { + err = cause.Cause() + } else { + err = nil + } + } + return false +} + +// Timeout returns true if the error occurred due to time expiring. +func (e ErrorNode) Timeout() bool { + type timeout interface { + Timeout() bool + } + + for err := e.cause; err != nil; { + if t, ok := err.(timeout); ok { + return t.Timeout() + } + + if cause, ok := err.(causer); ok { + err = cause.Cause() + } else { + err = nil + } + } + return false +} + +// Initialize is used to initialize an embedded ErrorNode field. +// It captures the caller's program counter and saves the cause (preceding error). +// To initialize the field, use "ErrorNode{}.Initialize(cause, 3)". A callersToSkip +// value of 3 is very common; but, depending on your code nesting, you may need +// a different value. +func (ErrorNode) Initialize(cause error, callersToSkip int) ErrorNode { + // Get the PC of Initialize method's caller. + pc := [1]uintptr{} + _ = runtime.Callers(callersToSkip, pc[:]) + return ErrorNode{pc: pc[0], cause: cause} +} + +// Cause walks all the preceding errors and return the originating error. +func Cause(err error) error { + for err != nil { + cause, ok := err.(causer) + if !ok { + break + } + err = cause.Cause() + } + return err +} + +// NewError creates a simple string error (like Error.New). But, this +// error also captures the caller's Program Counter and the preceding error. +func NewError(cause error, msg string) error { + return &pcError{ + ErrorNode: ErrorNode{}.Initialize(cause, 3), + msg: msg, + } +} + +// pcError is a simple string error (like error.New) with an ErrorNode (PC & cause). +type pcError struct { + ErrorNode + msg string +} + +// Error satisfies the error interface. It shows the error with Program Counter +// symbols and calls Error on the preceding error so you can see the full error chain. +func (e *pcError) Error() string { return e.ErrorNode.Error(e.msg) } diff --git a/vendor/github.com/Azure/azure-pipeline-go/pipeline/progress.go b/vendor/github.com/Azure/azure-pipeline-go/pipeline/progress.go new file mode 100755 index 000000000000..efa3c8ed06b5 --- /dev/null +++ b/vendor/github.com/Azure/azure-pipeline-go/pipeline/progress.go @@ -0,0 +1,82 @@ +package pipeline + +import "io" + +// ********** The following is common between the request body AND the response body. + +// ProgressReceiver defines the signature of a callback function invoked as progress is reported. +type ProgressReceiver func(bytesTransferred int64) + +// ********** The following are specific to the request body (a ReadSeekCloser) + +// This struct is used when sending a body to the network +type requestBodyProgress struct { + requestBody io.ReadSeeker // Seeking is required to support retries + pr ProgressReceiver +} + +// NewRequestBodyProgress adds progress reporting to an HTTP request's body stream. +func NewRequestBodyProgress(requestBody io.ReadSeeker, pr ProgressReceiver) io.ReadSeeker { + if pr == nil { + panic("pr must not be nil") + } + return &requestBodyProgress{requestBody: requestBody, pr: pr} +} + +// Read reads a block of data from an inner stream and reports progress +func (rbp *requestBodyProgress) Read(p []byte) (n int, err error) { + n, err = rbp.requestBody.Read(p) + if err != nil { + return + } + // Invokes the user's callback method to report progress + position, err := rbp.requestBody.Seek(0, io.SeekCurrent) + if err != nil { + panic(err) + } + rbp.pr(position) + return +} + +func (rbp *requestBodyProgress) Seek(offset int64, whence int) (offsetFromStart int64, err error) { + return rbp.requestBody.Seek(offset, whence) +} + +// requestBodyProgress supports Close but the underlying stream may not; if it does, Close will close it. +func (rbp *requestBodyProgress) Close() error { + if c, ok := rbp.requestBody.(io.Closer); ok { + return c.Close() + } + return nil +} + +// ********** The following are specific to the response body (a ReadCloser) + +// This struct is used when sending a body to the network +type responseBodyProgress struct { + responseBody io.ReadCloser + pr ProgressReceiver + offset int64 +} + +// NewResponseBodyProgress adds progress reporting to an HTTP response's body stream. +func NewResponseBodyProgress(responseBody io.ReadCloser, pr ProgressReceiver) io.ReadCloser { + if pr == nil { + panic("pr must not be nil") + } + return &responseBodyProgress{responseBody: responseBody, pr: pr, offset: 0} +} + +// Read reads a block of data from an inner stream and reports progress +func (rbp *responseBodyProgress) Read(p []byte) (n int, err error) { + n, err = rbp.responseBody.Read(p) + rbp.offset += int64(n) + + // Invokes the user's callback method to report progress + rbp.pr(rbp.offset) + return +} + +func (rbp *responseBodyProgress) Close() error { + return rbp.responseBody.Close() +} diff --git a/vendor/github.com/Azure/azure-pipeline-go/pipeline/request.go b/vendor/github.com/Azure/azure-pipeline-go/pipeline/request.go new file mode 100755 index 000000000000..1fbe72bd4dd7 --- /dev/null +++ b/vendor/github.com/Azure/azure-pipeline-go/pipeline/request.go @@ -0,0 +1,147 @@ +package pipeline + +import ( + "io" + "net/http" + "net/url" + "strconv" +) + +// Request is a thin wrapper over an http.Request. The wrapper provides several helper methods. +type Request struct { + *http.Request +} + +// NewRequest initializes a new HTTP request object with any desired options. +func NewRequest(method string, url url.URL, body io.ReadSeeker) (request Request, err error) { + // Note: the url is passed by value so that any pipeline operations that modify it do so on a copy. + + // This code to construct an http.Request is copied from http.NewRequest(); we intentionally omitted removeEmptyPort for now. + request.Request = &http.Request{ + Method: method, + URL: &url, + Proto: "HTTP/1.1", + ProtoMajor: 1, + ProtoMinor: 1, + Header: make(http.Header), + Host: url.Host, + } + + if body != nil { + err = request.SetBody(body) + } + return +} + +// SetBody sets the body and content length, assumes body is not nil. +func (r Request) SetBody(body io.ReadSeeker) error { + size, err := body.Seek(0, io.SeekEnd) + if err != nil { + return err + } + + body.Seek(0, io.SeekStart) + r.ContentLength = size + r.Header["Content-Length"] = []string{strconv.FormatInt(size, 10)} + + if size != 0 { + r.Body = &retryableRequestBody{body: body} + r.GetBody = func() (io.ReadCloser, error) { + _, err := body.Seek(0, io.SeekStart) + if err != nil { + return nil, err + } + return r.Body, nil + } + } else { + // in case the body is an empty stream, we need to use http.NoBody to explicitly provide no content + r.Body = http.NoBody + r.GetBody = func() (io.ReadCloser, error) { + return http.NoBody, nil + } + + // close the user-provided empty body + if c, ok := body.(io.Closer); ok { + c.Close() + } + } + + return nil +} + +// Copy makes a copy of an http.Request. Specifically, it makes a deep copy +// of its Method, URL, Host, Proto(Major/Minor), Header. ContentLength, Close, +// RemoteAddr, RequestURI. Copy makes a shallow copy of the Body, GetBody, TLS, +// Cancel, Response, and ctx fields. Copy panics if any of these fields are +// not nil: TransferEncoding, Form, PostForm, MultipartForm, or Trailer. +func (r Request) Copy() Request { + if r.TransferEncoding != nil || r.Form != nil || r.PostForm != nil || r.MultipartForm != nil || r.Trailer != nil { + panic("Can't make a deep copy of the http.Request because at least one of the following is not nil:" + + "TransferEncoding, Form, PostForm, MultipartForm, or Trailer.") + } + copy := *r.Request // Copy the request + urlCopy := *(r.Request.URL) // Copy the URL + copy.URL = &urlCopy + copy.Header = http.Header{} // Copy the header + for k, vs := range r.Header { + for _, value := range vs { + copy.Header.Add(k, value) + } + } + return Request{Request: ©} // Return the copy +} + +func (r Request) close() error { + if r.Body != nil && r.Body != http.NoBody { + c, ok := r.Body.(*retryableRequestBody) + if !ok { + panic("unexpected request body type (should be *retryableReadSeekerCloser)") + } + return c.realClose() + } + return nil +} + +// RewindBody seeks the request's Body stream back to the beginning so it can be resent when retrying an operation. +func (r Request) RewindBody() error { + if r.Body != nil && r.Body != http.NoBody { + s, ok := r.Body.(io.Seeker) + if !ok { + panic("unexpected request body type (should be io.Seeker)") + } + + // Reset the stream back to the beginning + _, err := s.Seek(0, io.SeekStart) + return err + } + return nil +} + +// ********** The following type/methods implement the retryableRequestBody (a ReadSeekCloser) + +// This struct is used when sending a body to the network +type retryableRequestBody struct { + body io.ReadSeeker // Seeking is required to support retries +} + +// Read reads a block of data from an inner stream and reports progress +func (b *retryableRequestBody) Read(p []byte) (n int, err error) { + return b.body.Read(p) +} + +func (b *retryableRequestBody) Seek(offset int64, whence int) (offsetFromStart int64, err error) { + return b.body.Seek(offset, whence) +} + +func (b *retryableRequestBody) Close() error { + // We don't want the underlying transport to close the request body on transient failures so this is a nop. + // The pipeline closes the request body upon success. + return nil +} + +func (b *retryableRequestBody) realClose() error { + if c, ok := b.body.(io.Closer); ok { + return c.Close() + } + return nil +} diff --git a/vendor/github.com/Azure/azure-pipeline-go/pipeline/response.go b/vendor/github.com/Azure/azure-pipeline-go/pipeline/response.go new file mode 100755 index 000000000000..f2dc164821dc --- /dev/null +++ b/vendor/github.com/Azure/azure-pipeline-go/pipeline/response.go @@ -0,0 +1,74 @@ +package pipeline + +import ( + "bytes" + "fmt" + "net/http" + "sort" + "strings" +) + +// The Response interface exposes an http.Response object as it returns through the pipeline of Policy objects. +// This ensures that Policy objects have access to the HTTP response. However, the object this interface encapsulates +// might be a struct with additional fields that is created by a Policy object (typically a method-specific Factory). +// The method that injected the method-specific Factory gets this returned Response and performs a type assertion +// to the expected struct and returns the struct to its caller. +type Response interface { + Response() *http.Response +} + +// This is the default struct that has the http.Response. +// A method can replace this struct with its own struct containing an http.Response +// field and any other additional fields. +type httpResponse struct { + response *http.Response +} + +// NewHTTPResponse is typically called by a Policy object to return a Response object. +func NewHTTPResponse(response *http.Response) Response { + return &httpResponse{response: response} +} + +// This method satisfies the public Response interface's Response method +func (r httpResponse) Response() *http.Response { + return r.response +} + +// WriteRequestWithResponse appends a formatted HTTP request into a Buffer. If request and/or err are +// not nil, then these are also written into the Buffer. +func WriteRequestWithResponse(b *bytes.Buffer, request *http.Request, response *http.Response, err error) { + // Write the request into the buffer. + fmt.Fprint(b, " "+request.Method+" "+request.URL.String()+"\n") + writeHeader(b, request.Header) + if response != nil { + fmt.Fprintln(b, " --------------------------------------------------------------------------------") + fmt.Fprint(b, " RESPONSE Status: "+response.Status+"\n") + writeHeader(b, response.Header) + } + if err != nil { + fmt.Fprintln(b, " --------------------------------------------------------------------------------") + fmt.Fprint(b, " ERROR:\n"+err.Error()+"\n") + } +} + +// formatHeaders appends an HTTP request's or response's header into a Buffer. +func writeHeader(b *bytes.Buffer, header map[string][]string) { + if len(header) == 0 { + b.WriteString(" (no headers)\n") + return + } + keys := make([]string, 0, len(header)) + // Alphabetize the headers + for k := range header { + keys = append(keys, k) + } + sort.Strings(keys) + for _, k := range keys { + // Redact the value of any Authorization header to prevent security information from persisting in logs + value := interface{}("REDACTED") + if !strings.EqualFold(k, "Authorization") { + value = header[k] + } + fmt.Fprintf(b, " %s: %+v\n", k, value) + } +} diff --git a/vendor/github.com/Azure/azure-pipeline-go/pipeline/version.go b/vendor/github.com/Azure/azure-pipeline-go/pipeline/version.go new file mode 100644 index 000000000000..c4bb62d810b2 --- /dev/null +++ b/vendor/github.com/Azure/azure-pipeline-go/pipeline/version.go @@ -0,0 +1,9 @@ +package pipeline + +const ( + // UserAgent is the string to be used in the user agent string when making requests. + UserAgent = "azure-pipeline-go/" + Version + + // Version is the semantic version (see http://semver.org) of the pipeline package. + Version = "0.1.0" +) diff --git a/vendor/github.com/Azure/azure-storage-blob-go/2018-03-28/azblob/access_conditions.go b/vendor/github.com/Azure/azure-storage-blob-go/2018-03-28/azblob/access_conditions.go new file mode 100644 index 000000000000..c7432e41c323 --- /dev/null +++ b/vendor/github.com/Azure/azure-storage-blob-go/2018-03-28/azblob/access_conditions.go @@ -0,0 +1,67 @@ +package azblob + +import ( + "time" +) + +// HTTPAccessConditions identifies standard HTTP access conditions which you optionally set. +type HTTPAccessConditions struct { + IfModifiedSince time.Time + IfUnmodifiedSince time.Time + IfMatch ETag + IfNoneMatch ETag +} + +// pointers is for internal infrastructure. It returns the fields as pointers. +func (ac HTTPAccessConditions) pointers() (ims *time.Time, ius *time.Time, ime *ETag, inme *ETag) { + if !ac.IfModifiedSince.IsZero() { + ims = &ac.IfModifiedSince + } + if !ac.IfUnmodifiedSince.IsZero() { + ius = &ac.IfUnmodifiedSince + } + if ac.IfMatch != ETagNone { + ime = &ac.IfMatch + } + if ac.IfNoneMatch != ETagNone { + inme = &ac.IfNoneMatch + } + return +} + +// ContainerAccessConditions identifies container-specific access conditions which you optionally set. +type ContainerAccessConditions struct { + HTTPAccessConditions + LeaseAccessConditions +} + +// BlobAccessConditions identifies blob-specific access conditions which you optionally set. +type BlobAccessConditions struct { + HTTPAccessConditions + LeaseAccessConditions + AppendBlobAccessConditions + PageBlobAccessConditions +} + +// LeaseAccessConditions identifies lease access conditions for a container or blob which you optionally set. +type LeaseAccessConditions struct { + LeaseID string +} + +// pointers is for internal infrastructure. It returns the fields as pointers. +func (ac LeaseAccessConditions) pointers() (leaseID *string) { + if ac.LeaseID != "" { + leaseID = &ac.LeaseID + } + return +} + +/* +// getInt32 is for internal infrastructure. It is used with access condition values where +// 0 (the default setting) is meaningful. The library interprets 0 as do not send the header +// and the privately-storage field in the access condition object is stored as +1 higher than desired. +// THis method returns true, if the value is > 0 (explicitly set) and the stored value - 1 (the set desired value). +func getInt32(value int32) (bool, int32) { + return value > 0, value - 1 +} +*/ diff --git a/vendor/github.com/Azure/azure-storage-blob-go/2018-03-28/azblob/atomicmorph.go b/vendor/github.com/Azure/azure-storage-blob-go/2018-03-28/azblob/atomicmorph.go new file mode 100644 index 000000000000..385b0458b3d7 --- /dev/null +++ b/vendor/github.com/Azure/azure-storage-blob-go/2018-03-28/azblob/atomicmorph.go @@ -0,0 +1,79 @@ +package azblob + +import "sync/atomic" + +// AtomicMorpherInt32 identifies a method passed to and invoked by the AtomicMorphInt32 function. +// The AtomicMorpher callback is passed a startValue and based on this value it returns +// what the new value should be and the result that AtomicMorph should return to its caller. +type AtomicMorpherInt32 func(startVal int32) (val int32, morphResult interface{}) + +// AtomicMorph atomically morphs target in to new value (and result) as indicated bythe AtomicMorpher callback function. +func AtomicMorphInt32(target *int32, morpher AtomicMorpherInt32) interface{} { + if target == nil || morpher == nil { + panic("target and morpher mut not be nil") + } + for { + currentVal := atomic.LoadInt32(target) + desiredVal, morphResult := morpher(currentVal) + if atomic.CompareAndSwapInt32(target, currentVal, desiredVal) { + return morphResult + } + } +} + +// AtomicMorpherUint32 identifies a method passed to and invoked by the AtomicMorph function. +// The AtomicMorpher callback is passed a startValue and based on this value it returns +// what the new value should be and the result that AtomicMorph should return to its caller. +type AtomicMorpherUint32 func(startVal uint32) (val uint32, morphResult interface{}) + +// AtomicMorph atomically morphs target in to new value (and result) as indicated bythe AtomicMorpher callback function. +func AtomicMorphUint32(target *uint32, morpher AtomicMorpherUint32) interface{} { + if target == nil || morpher == nil { + panic("target and morpher mut not be nil") + } + for { + currentVal := atomic.LoadUint32(target) + desiredVal, morphResult := morpher(currentVal) + if atomic.CompareAndSwapUint32(target, currentVal, desiredVal) { + return morphResult + } + } +} + +// AtomicMorpherUint64 identifies a method passed to and invoked by the AtomicMorphUint64 function. +// The AtomicMorpher callback is passed a startValue and based on this value it returns +// what the new value should be and the result that AtomicMorph should return to its caller. +type AtomicMorpherInt64 func(startVal int64) (val int64, morphResult interface{}) + +// AtomicMorph atomically morphs target in to new value (and result) as indicated bythe AtomicMorpher callback function. +func AtomicMorphInt64(target *int64, morpher AtomicMorpherInt64) interface{} { + if target == nil || morpher == nil { + panic("target and morpher mut not be nil") + } + for { + currentVal := atomic.LoadInt64(target) + desiredVal, morphResult := morpher(currentVal) + if atomic.CompareAndSwapInt64(target, currentVal, desiredVal) { + return morphResult + } + } +} + +// AtomicMorpherUint64 identifies a method passed to and invoked by the AtomicMorphUint64 function. +// The AtomicMorpher callback is passed a startValue and based on this value it returns +// what the new value should be and the result that AtomicMorph should return to its caller. +type AtomicMorpherUint64 func(startVal uint64) (val uint64, morphResult interface{}) + +// AtomicMorph atomically morphs target in to new value (and result) as indicated bythe AtomicMorpher callback function. +func AtomicMorphUint64(target *uint64, morpher AtomicMorpherUint64) interface{} { + if target == nil || morpher == nil { + panic("target and morpher mut not be nil") + } + for { + currentVal := atomic.LoadUint64(target) + desiredVal, morphResult := morpher(currentVal) + if atomic.CompareAndSwapUint64(target, currentVal, desiredVal) { + return morphResult + } + } +} diff --git a/vendor/github.com/Azure/azure-storage-blob-go/2018-03-28/azblob/highlevel.go b/vendor/github.com/Azure/azure-storage-blob-go/2018-03-28/azblob/highlevel.go new file mode 100644 index 000000000000..aa022826bb18 --- /dev/null +++ b/vendor/github.com/Azure/azure-storage-blob-go/2018-03-28/azblob/highlevel.go @@ -0,0 +1,510 @@ +package azblob + +import ( + "context" + "encoding/base64" + "fmt" + "io" + "net/http" + + "bytes" + "os" + "sync" + "time" + + "github.com/Azure/azure-pipeline-go/pipeline" +) + +// CommonResponseHeaders returns the headers common to all blob REST API responses. +type CommonResponse interface { + // ETag returns the value for header ETag. + ETag() ETag + + // LastModified returns the value for header Last-Modified. + LastModified() time.Time + + // RequestID returns the value for header x-ms-request-id. + RequestID() string + + // Date returns the value for header Date. + Date() time.Time + + // Version returns the value for header x-ms-version. + Version() string + + // Response returns the raw HTTP response object. + Response() *http.Response +} + +// UploadToBlockBlobOptions identifies options used by the UploadBufferToBlockBlob and UploadFileToBlockBlob functions. +type UploadToBlockBlobOptions struct { + // BlockSize specifies the block size to use; the default (and maximum size) is BlockBlobMaxStageBlockBytes. + BlockSize int64 + + // Progress is a function that is invoked periodically as bytes are sent to the BlockBlobURL. + Progress pipeline.ProgressReceiver + + // BlobHTTPHeaders indicates the HTTP headers to be associated with the blob. + BlobHTTPHeaders BlobHTTPHeaders + + // Metadata indicates the metadata to be associated with the blob when PutBlockList is called. + Metadata Metadata + + // AccessConditions indicates the access conditions for the block blob. + AccessConditions BlobAccessConditions + + // Parallelism indicates the maximum number of blocks to upload in parallel (0=default) + Parallelism uint16 +} + +// UploadBufferToBlockBlob uploads a buffer in blocks to a block blob. +func UploadBufferToBlockBlob(ctx context.Context, b []byte, + blockBlobURL BlockBlobURL, o UploadToBlockBlobOptions) (CommonResponse, error) { + + // Validate parameters and set defaults + if o.BlockSize < 0 || o.BlockSize > BlockBlobMaxUploadBlobBytes { + panic(fmt.Sprintf("BlockSize option must be > 0 and <= %d", BlockBlobMaxUploadBlobBytes)) + } + if o.BlockSize == 0 { + o.BlockSize = BlockBlobMaxUploadBlobBytes // Default if unspecified + } + size := int64(len(b)) + + if size <= BlockBlobMaxUploadBlobBytes { + // If the size can fit in 1 Upload call, do it this way + var body io.ReadSeeker = bytes.NewReader(b) + if o.Progress != nil { + body = pipeline.NewRequestBodyProgress(body, o.Progress) + } + return blockBlobURL.Upload(ctx, body, o.BlobHTTPHeaders, o.Metadata, o.AccessConditions) + } + + var numBlocks = uint16(((size - 1) / o.BlockSize) + 1) + if numBlocks > BlockBlobMaxBlocks { + panic(fmt.Sprintf("The buffer's size is too big or the BlockSize is too small; the number of blocks must be <= %d", BlockBlobMaxBlocks)) + } + + blockIDList := make([]string, numBlocks) // Base-64 encoded block IDs + progress := int64(0) + progressLock := &sync.Mutex{} + + err := doBatchTransfer(ctx, batchTransferOptions{ + operationName: "UploadBufferToBlockBlob", + transferSize: size, + chunkSize: o.BlockSize, + parallelism: o.Parallelism, + operation: func(offset int64, count int64) error { + // This function is called once per block. + // It is passed this block's offset within the buffer and its count of bytes + // Prepare to read the proper block/section of the buffer + var body io.ReadSeeker = bytes.NewReader(b[offset : offset+count]) + blockNum := offset / o.BlockSize + if o.Progress != nil { + blockProgress := int64(0) + body = pipeline.NewRequestBodyProgress(body, + func(bytesTransferred int64) { + diff := bytesTransferred - blockProgress + blockProgress = bytesTransferred + progressLock.Lock() // 1 goroutine at a time gets a progress report + progress += diff + o.Progress(progress) + progressLock.Unlock() + }) + } + + // Block IDs are unique values to avoid issue if 2+ clients are uploading blocks + // at the same time causing PutBlockList to get a mix of blocks from all the clients. + blockIDList[blockNum] = base64.StdEncoding.EncodeToString(newUUID().bytes()) + _, err := blockBlobURL.StageBlock(ctx, blockIDList[blockNum], body, o.AccessConditions.LeaseAccessConditions) + return err + }, + }) + if err != nil { + return nil, err + } + // All put blocks were successful, call Put Block List to finalize the blob + return blockBlobURL.CommitBlockList(ctx, blockIDList, o.BlobHTTPHeaders, o.Metadata, o.AccessConditions) +} + +// UploadFileToBlockBlob uploads a file in blocks to a block blob. +func UploadFileToBlockBlob(ctx context.Context, file *os.File, + blockBlobURL BlockBlobURL, o UploadToBlockBlobOptions) (CommonResponse, error) { + + stat, err := file.Stat() + if err != nil { + return nil, err + } + m := mmf{} // Default to an empty slice; used for 0-size file + if stat.Size() != 0 { + m, err = newMMF(file, false, 0, int(stat.Size())) + if err != nil { + return nil, err + } + defer m.unmap() + } + return UploadBufferToBlockBlob(ctx, m, blockBlobURL, o) +} + +/////////////////////////////////////////////////////////////////////////////// + + +const BlobDefaultDownloadBlockSize = int64(4 * 1024 * 1024) // 4MB + +// DownloadFromAzureFileOptions identifies options used by the DownloadAzureFileToBuffer and DownloadAzureFileToFile functions. +type DownloadFromBlobOptions struct { + // BlockSize specifies the block size to use for each parallel download; the default size is BlobDefaultDownloadBlockSize. + BlockSize int64 + + // Progress is a function that is invoked periodically as bytes are received. + Progress pipeline.ProgressReceiver + + // AccessConditions indicates the access conditions used when making HTTP GET requests against the blob. + AccessConditions BlobAccessConditions + + // Parallelism indicates the maximum number of blocks to download in parallel (0=default) + Parallelism uint16 + + // RetryReaderOptionsPerBlock is used when downloading each block. + RetryReaderOptionsPerBlock RetryReaderOptions +} + +// downloadAzureFileToBuffer downloads an Azure file to a buffer with parallel. +func downloadBlobToBuffer(ctx context.Context, blobURL BlobURL, offset int64, count int64, + ac BlobAccessConditions, b []byte, o DownloadFromBlobOptions, + initialDownloadResponse *DownloadResponse) error { + // Validate parameters, and set defaults. + if o.BlockSize < 0 { + panic("BlockSize option must be >= 0") + } + if o.BlockSize == 0 { + o.BlockSize = BlobDefaultDownloadBlockSize + } + + if offset < 0 { + panic("offset option must be >= 0") + } + + if count < 0 { + panic("count option must be >= 0") + } + + if count == CountToEnd { // If size not specified, calculate it + if initialDownloadResponse != nil { + count = initialDownloadResponse.ContentLength() - offset // if we have the length, use it + } else { + // If we don't have the length at all, get it + dr, err := blobURL.Download(ctx, 0, CountToEnd, ac, false) + if err != nil { + return err + } + count = dr.ContentLength() - offset + } + } + + if int64(len(b)) < count { + panic(fmt.Errorf("the buffer's size should be equal to or larger than the request count of bytes: %d", count)) + } + + // Prepare and do parallel download. + progress := int64(0) + progressLock := &sync.Mutex{} + + err := doBatchTransfer(ctx, batchTransferOptions{ + operationName: "downloadBlobToBuffer", + transferSize: count, + chunkSize: o.BlockSize, + parallelism: o.Parallelism, + operation: func(chunkStart int64, count int64) error { + dr, err := blobURL.Download(ctx, chunkStart+ offset, count, ac, false) + body := dr.Body(o.RetryReaderOptionsPerBlock) + if o.Progress != nil { + rangeProgress := int64(0) + body = pipeline.NewResponseBodyProgress( + body, + func(bytesTransferred int64) { + diff := bytesTransferred - rangeProgress + rangeProgress = bytesTransferred + progressLock.Lock() + progress += diff + o.Progress(progress) + progressLock.Unlock() + }) + } + _, err = io.ReadFull(body, b[chunkStart:chunkStart+count]) + body.Close() + return err + }, + }) + if err != nil { + return err + } + return nil +} + +// DownloadAzureFileToBuffer downloads an Azure file to a buffer with parallel. +// Offset and count are optional, pass 0 for both to download the entire blob. +func DownloadBlobToBuffer(ctx context.Context, blobURL BlobURL, offset int64, count int64, + ac BlobAccessConditions, b []byte, o DownloadFromBlobOptions) error { + return downloadBlobToBuffer(ctx, blobURL, offset, count, ac, b, o, nil) +} + +// DownloadBlobToFile downloads an Azure file to a local file. +// The file would be truncated if the size doesn't match. +// Offset and count are optional, pass 0 for both to download the entire blob. +func DownloadBlobToFile(ctx context.Context, blobURL BlobURL, offset int64, count int64, + ac BlobAccessConditions, file *os.File, o DownloadFromBlobOptions) error { + // 1. Validate parameters. + if file == nil { + panic("file must not be nil") + } + + // 2. Calculate the size of the destination file + var size int64 + + if count == CountToEnd { + // Try to get Azure file's size + props, err := blobURL.GetProperties(ctx, ac) + if err != nil { + return err + } + size = props.ContentLength() - offset + } else { + size = count + } + + // 3. Compare and try to resize local file's size if it doesn't match Azure file's size. + stat, err := file.Stat() + if err != nil { + return err + } + if stat.Size() != size { + if err = file.Truncate(size); err != nil { + return err + } + } + + if size > 0 { + // 4. Set mmap and call DownloadAzureFileToBuffer. + m, err := newMMF(file, true, 0, int(size)) + if err != nil { + return err + } + defer m.unmap() + return downloadBlobToBuffer(ctx, blobURL, offset, size, ac, m, o, nil) + } else { // if the blob's size is 0, there is no need in downloading it + return nil + } +} + + +/////////////////////////////////////////////////////////////////////////////// + +// BatchTransferOptions identifies options used by doBatchTransfer. +type batchTransferOptions struct { + transferSize int64 + chunkSize int64 + parallelism uint16 + operation func(offset int64, chunkSize int64) error + operationName string +} + +// doBatchTransfer helps to execute operations in a batch manner. +func doBatchTransfer(ctx context.Context, o batchTransferOptions) error { + // Prepare and do parallel operations. + numChunks := uint16(((o.transferSize - 1) / o.chunkSize) + 1) + operationChannel := make(chan func() error, o.parallelism) // Create the channel that release 'parallelism' goroutines concurrently + operationResponseChannel := make(chan error, numChunks) // Holds each response + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + // Create the goroutines that process each operation (in parallel). + if o.parallelism == 0 { + o.parallelism = 5 // default parallelism + } + for g := uint16(0); g < o.parallelism; g++ { + //grIndex := g + go func() { + for f := range operationChannel { + //fmt.Printf("[%s] gr-%d start action\n", o.operationName, grIndex) + err := f() + operationResponseChannel <- err + //fmt.Printf("[%s] gr-%d end action\n", o.operationName, grIndex) + } + }() + } + + // Add each chunk's operation to the channel. + for chunkNum := uint16(0); chunkNum < numChunks; chunkNum++ { + curChunkSize := o.chunkSize + + if chunkNum == numChunks-1 { // Last chunk + curChunkSize = o.transferSize - (int64(chunkNum) * o.chunkSize) // Remove size of all transferred chunks from total + } + offset := int64(chunkNum) * o.chunkSize + + operationChannel <- func() error { + return o.operation(offset, curChunkSize) + } + } + close(operationChannel) + + // Wait for the operations to complete. + for chunkNum := uint16(0); chunkNum < numChunks; chunkNum++ { + responseError := <-operationResponseChannel + if responseError != nil { + cancel() // As soon as any operation fails, cancel all remaining operation calls + return responseError // No need to process anymore responses + } + } + return nil +} + +//////////////////////////////////////////////////////////////////////////////////////////////// + +type UploadStreamToBlockBlobOptions struct { + BufferSize int + MaxBuffers int + BlobHTTPHeaders BlobHTTPHeaders + Metadata Metadata + AccessConditions BlobAccessConditions +} + +func UploadStreamToBlockBlob(ctx context.Context, reader io.Reader, blockBlobURL BlockBlobURL, + o UploadStreamToBlockBlobOptions) (CommonResponse, error) { + result, err := uploadStream(ctx, reader, + UploadStreamOptions{BufferSize: o.BufferSize, MaxBuffers: o.MaxBuffers}, + &uploadStreamToBlockBlobOptions{b: blockBlobURL, o: o, blockIDPrefix: newUUID()}) + return result.(CommonResponse), err +} + +type uploadStreamToBlockBlobOptions struct { + b BlockBlobURL + o UploadStreamToBlockBlobOptions + blockIDPrefix uuid // UUID used with all blockIDs + maxBlockNum uint32 // defaults to 0 + firstBlock []byte // Used only if maxBlockNum is 0 +} + +func (t *uploadStreamToBlockBlobOptions) start(ctx context.Context) (interface{}, error) { + return nil, nil +} + +func (t *uploadStreamToBlockBlobOptions) chunk(ctx context.Context, num uint32, buffer []byte) error { + if num == 0 && len(buffer) < t.o.BufferSize { + // If whole payload fits in 1 block, don't stage it; End will upload it with 1 I/O operation + t.firstBlock = buffer + return nil + } + // Else, upload a staged block... + AtomicMorphUint32(&t.maxBlockNum, func(startVal uint32) (val uint32, morphResult interface{}) { + // Atomically remember (in t.numBlocks) the maximum block num we've ever seen + if startVal < num { + return num, nil + } + return startVal, nil + }) + blockID := newUuidBlockID(t.blockIDPrefix).WithBlockNumber(num).ToBase64() + _, err := t.b.StageBlock(ctx, blockID, bytes.NewReader(buffer), LeaseAccessConditions{}) + return err +} + +func (t *uploadStreamToBlockBlobOptions) end(ctx context.Context) (interface{}, error) { + if t.maxBlockNum == 0 { + // If whole payload fits in 1 block (block #0), upload it with 1 I/O operation + return t.b.Upload(ctx, bytes.NewReader(t.firstBlock), + t.o.BlobHTTPHeaders, t.o.Metadata, t.o.AccessConditions) + } + // Multiple blocks staged, commit them all now + blockID := newUuidBlockID(t.blockIDPrefix) + blockIDs := make([]string, t.maxBlockNum + 1) + for bn := uint32(0); bn <= t.maxBlockNum; bn++ { + blockIDs[bn] = blockID.WithBlockNumber(bn).ToBase64() + } + return t.b.CommitBlockList(ctx, blockIDs, t.o.BlobHTTPHeaders, t.o.Metadata, t.o.AccessConditions) +} + +//////////////////////////////////////////////////////////////////////////////////////////////////// + +type iTransfer interface { + start(ctx context.Context) (interface{}, error) + chunk(ctx context.Context, num uint32, buffer []byte) error + end(ctx context.Context) (interface{}, error) +} + +type UploadStreamOptions struct { + MaxBuffers int + BufferSize int +} + +func uploadStream(ctx context.Context, reader io.Reader, o UploadStreamOptions, t iTransfer) (interface{}, error) { + ctx, cancel := context.WithCancel(ctx) // New context so that any failure cancels everything + defer cancel() + wg := sync.WaitGroup{} // Used to know when all outgoing messages have finished processing + type OutgoingMsg struct { + chunkNum uint32 + buffer []byte + } + + // Create a channel to hold the buffers usable for incoming datsa + incoming := make(chan []byte, o.MaxBuffers) + outgoing := make(chan OutgoingMsg, o.MaxBuffers) // Channel holding outgoing buffers + if result, err := t.start(ctx); err != nil { + return result, err + } + + numBuffers := 0 // The number of buffers & out going goroutines created so far + injectBuffer := func() { + // For each Buffer, create it and a goroutine to upload it + incoming <- make([]byte, o.BufferSize) // Add the new buffer to the incoming channel so this goroutine can from the reader into it + numBuffers++ + go func() { + for outgoingMsg := range outgoing { + // Upload the outgoing buffer + err := t.chunk(ctx, outgoingMsg.chunkNum, outgoingMsg.buffer) + wg.Done() // Indicate this buffer was sent + if nil != err { + cancel() + } + incoming <- outgoingMsg.buffer // The goroutine reading from the stream can use reuse this buffer now + } + }() + } + injectBuffer() // Create our 1st buffer & outgoing goroutine + + // This goroutine grabs a buffer, reads from the stream into the buffer, + // and inserts the buffer into the outgoing channel to be uploaded + for c := uint32(0); true; c++ { // Iterate once per chunk + var buffer []byte + if numBuffers < o.MaxBuffers { + select { + // We're not at max buffers, see if a previously-created buffer is available + case buffer = <-incoming: + break + default: + // No buffer available; inject a new buffer & go routine to process it + injectBuffer() + buffer = <-incoming // Grab the just-injected buffer + } + } else { + // We are at max buffers, block until we get to reuse one + buffer = <-incoming + } + n, err := io.ReadFull(reader, buffer) + if err != nil { + buffer = buffer[:n] // Make slice match the # of read bytes + } + if len(buffer) > 0 { + // Buffer not empty, upload it + wg.Add(1) // We're posting a buffer to be sent + outgoing <- OutgoingMsg{chunkNum: c, buffer: buffer} + } + if err != nil { // The reader is done, no more outgoing buffers + break + } + } + // NOTE: Don't close the incoming channel because the outgoing goroutines post buffers into it when they are done + close(outgoing) // Make all the outgoing goroutines terminate when this channel is empty + wg.Wait() // Wait for all pending outgoing messages to complete + // After all blocks uploaded, commit them to the blob & return the result + return t.end(ctx) +} diff --git a/vendor/github.com/Azure/azure-storage-blob-go/2018-03-28/azblob/parsing_urls.go b/vendor/github.com/Azure/azure-storage-blob-go/2018-03-28/azblob/parsing_urls.go new file mode 100644 index 000000000000..e797a59c0b97 --- /dev/null +++ b/vendor/github.com/Azure/azure-storage-blob-go/2018-03-28/azblob/parsing_urls.go @@ -0,0 +1,111 @@ +package azblob + +import ( + "net/url" + "strings" +) + +const ( + snapshot = "snapshot" + SnapshotTimeFormat = "2006-01-02T15:04:05.0000000Z07:00" +) + +// A BlobURLParts object represents the components that make up an Azure Storage Container/Blob URL. You parse an +// existing URL into its parts by calling NewBlobURLParts(). You construct a URL from parts by calling URL(). +// NOTE: Changing any SAS-related field requires computing a new SAS signature. +type BlobURLParts struct { + Scheme string // Ex: "https://" + Host string // Ex: "account.blob.core.windows.net" + ContainerName string // "" if no container + BlobName string // "" if no blob + Snapshot string // "" if not a snapshot + SAS SASQueryParameters + UnparsedParams string +} + +// NewBlobURLParts parses a URL initializing BlobURLParts' fields including any SAS-related & snapshot query parameters. Any other +// query parameters remain in the UnparsedParams field. This method overwrites all fields in the BlobURLParts object. +func NewBlobURLParts(u url.URL) BlobURLParts { + up := BlobURLParts{ + Scheme: u.Scheme, + Host: u.Host, + } + + // Find the container & blob names (if any) + if u.Path != "" { + path := u.Path + if path[0] == '/' { + path = path[1:] // If path starts with a slash, remove it + } + + // Find the next slash (if it exists) + containerEndIndex := strings.Index(path, "/") + if containerEndIndex == -1 { // Slash not found; path has container name & no blob name + up.ContainerName = path + } else { + up.ContainerName = path[:containerEndIndex] // The container name is the part between the slashes + up.BlobName = path[containerEndIndex+1:] // The blob name is after the container slash + } + } + + // Convert the query parameters to a case-sensitive map & trim whitespace + paramsMap := u.Query() + + up.Snapshot = "" // Assume no snapshot + if snapshotStr, ok := caseInsensitiveValues(paramsMap).Get(snapshot); ok { + up.Snapshot = snapshotStr[0] + // If we recognized the query parameter, remove it from the map + delete(paramsMap, snapshot) + } + up.SAS = newSASQueryParameters(paramsMap, true) + up.UnparsedParams = paramsMap.Encode() + return up +} + +type caseInsensitiveValues url.Values // map[string][]string +func (values caseInsensitiveValues) Get(key string) ([]string, bool) { + key = strings.ToLower(key) + for k, v := range values { + if strings.ToLower(k) == key { + return v, true + } + } + return []string{}, false +} + +// URL returns a URL object whose fields are initialized from the BlobURLParts fields. The URL's RawQuery +// field contains the SAS, snapshot, and unparsed query parameters. +func (up BlobURLParts) URL() url.URL { + path := "" + // Concatenate container & blob names (if they exist) + if up.ContainerName != "" { + path += "/" + up.ContainerName + if up.BlobName != "" { + path += "/" + up.BlobName + } + } + + rawQuery := up.UnparsedParams + + // Concatenate blob snapshot query parameter (if it exists) + if up.Snapshot != "" { + if len(rawQuery) > 0 { + rawQuery += "&" + } + rawQuery += snapshot + "=" + up.Snapshot + } + sas := up.SAS.Encode() + if sas != "" { + if len(rawQuery) > 0 { + rawQuery += "&" + } + rawQuery += sas + } + u := url.URL{ + Scheme: up.Scheme, + Host: up.Host, + Path: path, + RawQuery: rawQuery, + } + return u +} diff --git a/vendor/github.com/Azure/azure-storage-blob-go/2018-03-28/azblob/sas_service.go b/vendor/github.com/Azure/azure-storage-blob-go/2018-03-28/azblob/sas_service.go new file mode 100644 index 000000000000..d0b12bc17a89 --- /dev/null +++ b/vendor/github.com/Azure/azure-storage-blob-go/2018-03-28/azblob/sas_service.go @@ -0,0 +1,206 @@ +package azblob + +import ( + "bytes" + "fmt" + "strings" + "time" +) + +// BlobSASSignatureValues is used to generate a Shared Access Signature (SAS) for an Azure Storage container or blob. +type BlobSASSignatureValues struct { + Version string `param:"sv"` // If not specified, this defaults to SASVersion + Protocol SASProtocol `param:"spr"` // See the SASProtocol* constants + StartTime time.Time `param:"st"` // Not specified if IsZero + ExpiryTime time.Time `param:"se"` // Not specified if IsZero + Permissions string `param:"sp"` // Create by initializing a ContainerSASPermissions or BlobSASPermissions and then call String() + IPRange IPRange `param:"sip"` + Identifier string `param:"si"` + ContainerName string + BlobName string // Use "" to create a Container SAS + CacheControl string // rscc + ContentDisposition string // rscd + ContentEncoding string // rsce + ContentLanguage string // rscl + ContentType string // rsct +} + +// NewSASQueryParameters uses an account's shared key credential to sign this signature values to produce +// the proper SAS query parameters. +func (v BlobSASSignatureValues) NewSASQueryParameters(sharedKeyCredential *SharedKeyCredential) SASQueryParameters { + if sharedKeyCredential == nil { + panic("sharedKeyCredential can't be nil") + } + + resource := "c" + if v.BlobName == "" { + // Make sure the permission characters are in the correct order + perms := &ContainerSASPermissions{} + if err := perms.Parse(v.Permissions); err != nil { + panic(err) + } + v.Permissions = perms.String() + } else { + resource = "b" + // Make sure the permission characters are in the correct order + perms := &BlobSASPermissions{} + if err := perms.Parse(v.Permissions); err != nil { + panic(err) + } + v.Permissions = perms.String() + } + if v.Version == "" { + v.Version = SASVersion + } + startTime, expiryTime := FormatTimesForSASSigning(v.StartTime, v.ExpiryTime) + + // String to sign: http://msdn.microsoft.com/en-us/library/azure/dn140255.aspx + stringToSign := strings.Join([]string{ + v.Permissions, + startTime, + expiryTime, + getCanonicalName(sharedKeyCredential.AccountName(), v.ContainerName, v.BlobName), + v.Identifier, + v.IPRange.String(), + string(v.Protocol), + v.Version, + v.CacheControl, // rscc + v.ContentDisposition, // rscd + v.ContentEncoding, // rsce + v.ContentLanguage, // rscl + v.ContentType}, // rsct + "\n") + signature := sharedKeyCredential.ComputeHMACSHA256(stringToSign) + + p := SASQueryParameters{ + // Common SAS parameters + version: v.Version, + protocol: v.Protocol, + startTime: v.StartTime, + expiryTime: v.ExpiryTime, + permissions: v.Permissions, + ipRange: v.IPRange, + + // Container/Blob-specific SAS parameters + resource: resource, + identifier: v.Identifier, + + // Calculated SAS signature + signature: signature, + } + return p +} + +// getCanonicalName computes the canonical name for a container or blob resource for SAS signing. +func getCanonicalName(account string, containerName string, blobName string) string { + // Container: "/blob/account/containername" + // Blob: "/blob/account/containername/blobname" + elements := []string{"/blob/", account, "/", containerName} + if blobName != "" { + elements = append(elements, "/", strings.Replace(blobName, "\\", "/", -1)) + } + return strings.Join(elements, "") +} + +// The ContainerSASPermissions type simplifies creating the permissions string for an Azure Storage container SAS. +// Initialize an instance of this type and then call its String method to set BlobSASSignatureValues's Permissions field. +type ContainerSASPermissions struct { + Read, Add, Create, Write, Delete, List bool +} + +// String produces the SAS permissions string for an Azure Storage container. +// Call this method to set BlobSASSignatureValues's Permissions field. +func (p ContainerSASPermissions) String() string { + var b bytes.Buffer + if p.Read { + b.WriteRune('r') + } + if p.Add { + b.WriteRune('a') + } + if p.Create { + b.WriteRune('c') + } + if p.Write { + b.WriteRune('w') + } + if p.Delete { + b.WriteRune('d') + } + if p.List { + b.WriteRune('l') + } + return b.String() +} + +// Parse initializes the ContainerSASPermissions's fields from a string. +func (p *ContainerSASPermissions) Parse(s string) error { + *p = ContainerSASPermissions{} // Clear the flags + for _, r := range s { + switch r { + case 'r': + p.Read = true + case 'a': + p.Add = true + case 'c': + p.Create = true + case 'w': + p.Write = true + case 'd': + p.Delete = true + case 'l': + p.List = true + default: + return fmt.Errorf("Invalid permission: '%v'", r) + } + } + return nil +} + +// The BlobSASPermissions type simplifies creating the permissions string for an Azure Storage blob SAS. +// Initialize an instance of this type and then call its String method to set BlobSASSignatureValues's Permissions field. +type BlobSASPermissions struct{ Read, Add, Create, Write, Delete bool } + +// String produces the SAS permissions string for an Azure Storage blob. +// Call this method to set BlobSASSignatureValues's Permissions field. +func (p BlobSASPermissions) String() string { + var b bytes.Buffer + if p.Read { + b.WriteRune('r') + } + if p.Add { + b.WriteRune('a') + } + if p.Create { + b.WriteRune('c') + } + if p.Write { + b.WriteRune('w') + } + if p.Delete { + b.WriteRune('d') + } + return b.String() +} + +// Parse initializes the BlobSASPermissions's fields from a string. +func (p *BlobSASPermissions) Parse(s string) error { + *p = BlobSASPermissions{} // Clear the flags + for _, r := range s { + switch r { + case 'r': + p.Read = true + case 'a': + p.Add = true + case 'c': + p.Create = true + case 'w': + p.Write = true + case 'd': + p.Delete = true + default: + return fmt.Errorf("Invalid permission: '%v'", r) + } + } + return nil +} diff --git a/vendor/github.com/Azure/azure-storage-blob-go/2018-03-28/azblob/service_codes_blob.go b/vendor/github.com/Azure/azure-storage-blob-go/2018-03-28/azblob/service_codes_blob.go new file mode 100644 index 000000000000..d260f8aee5cd --- /dev/null +++ b/vendor/github.com/Azure/azure-storage-blob-go/2018-03-28/azblob/service_codes_blob.go @@ -0,0 +1,195 @@ +package azblob + +// https://docs.microsoft.com/en-us/rest/api/storageservices/blob-service-error-codes + +// ServiceCode values indicate a service failure. +const ( + // ServiceCodeAppendPositionConditionNotMet means the append position condition specified was not met. + ServiceCodeAppendPositionConditionNotMet ServiceCodeType = "AppendPositionConditionNotMet" + + // ServiceCodeBlobAlreadyExists means the specified blob already exists. + ServiceCodeBlobAlreadyExists ServiceCodeType = "BlobAlreadyExists" + + // ServiceCodeBlobNotFound means the specified blob does not exist. + ServiceCodeBlobNotFound ServiceCodeType = "BlobNotFound" + + // ServiceCodeBlobOverwritten means the blob has been recreated since the previous snapshot was taken. + ServiceCodeBlobOverwritten ServiceCodeType = "BlobOverwritten" + + // ServiceCodeBlobTierInadequateForContentLength means the specified blob tier size limit cannot be less than content length. + ServiceCodeBlobTierInadequateForContentLength ServiceCodeType = "BlobTierInadequateForContentLength" + + // ServiceCodeBlockCountExceedsLimit means the committed block count cannot exceed the maximum limit of 50,000 blocks + // or that the uncommitted block count cannot exceed the maximum limit of 100,000 blocks. + ServiceCodeBlockCountExceedsLimit ServiceCodeType = "BlockCountExceedsLimit" + + // ServiceCodeBlockListTooLong means the block list may not contain more than 50,000 blocks. + ServiceCodeBlockListTooLong ServiceCodeType = "BlockListTooLong" + + // ServiceCodeCannotChangeToLowerTier means that a higher blob tier has already been explicitly set. + ServiceCodeCannotChangeToLowerTier ServiceCodeType = "CannotChangeToLowerTier" + + // ServiceCodeCannotVerifyCopySource means that the service could not verify the copy source within the specified time. + // Examine the HTTP status code and message for more information about the failure. + ServiceCodeCannotVerifyCopySource ServiceCodeType = "CannotVerifyCopySource" + + // ServiceCodeContainerAlreadyExists means the specified container already exists. + ServiceCodeContainerAlreadyExists ServiceCodeType = "ContainerAlreadyExists" + + // ServiceCodeContainerBeingDeleted means the specified container is being deleted. + ServiceCodeContainerBeingDeleted ServiceCodeType = "ContainerBeingDeleted" + + // ServiceCodeContainerDisabled means the specified container has been disabled by the administrator. + ServiceCodeContainerDisabled ServiceCodeType = "ContainerDisabled" + + // ServiceCodeContainerNotFound means the specified container does not exist. + ServiceCodeContainerNotFound ServiceCodeType = "ContainerNotFound" + + // ServiceCodeContentLengthLargerThanTierLimit means the blob's content length cannot exceed its tier limit. + ServiceCodeContentLengthLargerThanTierLimit ServiceCodeType = "ContentLengthLargerThanTierLimit" + + // ServiceCodeCopyAcrossAccountsNotSupported means the copy source account and destination account must be the same. + ServiceCodeCopyAcrossAccountsNotSupported ServiceCodeType = "CopyAcrossAccountsNotSupported" + + // ServiceCodeCopyIDMismatch means the specified copy ID did not match the copy ID for the pending copy operation. + ServiceCodeCopyIDMismatch ServiceCodeType = "CopyIdMismatch" + + // ServiceCodeFeatureVersionMismatch means the type of blob in the container is unrecognized by this version or + // that the operation for AppendBlob requires at least version 2015-02-21. + ServiceCodeFeatureVersionMismatch ServiceCodeType = "FeatureVersionMismatch" + + // ServiceCodeIncrementalCopyBlobMismatch means the specified source blob is different than the copy source of the existing incremental copy blob. + ServiceCodeIncrementalCopyBlobMismatch ServiceCodeType = "IncrementalCopyBlobMismatch" + + // ServiceCodeIncrementalCopyOfEralierVersionSnapshotNotAllowed means the specified snapshot is earlier than the last snapshot copied into the incremental copy blob. + ServiceCodeIncrementalCopyOfEralierVersionSnapshotNotAllowed ServiceCodeType = "IncrementalCopyOfEralierVersionSnapshotNotAllowed" + + // ServiceCodeIncrementalCopySourceMustBeSnapshot means the source for incremental copy request must be a snapshot. + ServiceCodeIncrementalCopySourceMustBeSnapshot ServiceCodeType = "IncrementalCopySourceMustBeSnapshot" + + // ServiceCodeInfiniteLeaseDurationRequired means the lease ID matched, but the specified lease must be an infinite-duration lease. + ServiceCodeInfiniteLeaseDurationRequired ServiceCodeType = "InfiniteLeaseDurationRequired" + + // ServiceCodeInvalidBlobOrBlock means the specified blob or block content is invalid. + ServiceCodeInvalidBlobOrBlock ServiceCodeType = "InvalidBlobOrBlock" + + // ServiceCodeInvalidBlobType means the blob type is invalid for this operation. + ServiceCodeInvalidBlobType ServiceCodeType = "InvalidBlobType" + + // ServiceCodeInvalidBlockID means the specified block ID is invalid. The block ID must be Base64-encoded. + ServiceCodeInvalidBlockID ServiceCodeType = "InvalidBlockId" + + // ServiceCodeInvalidBlockList means the specified block list is invalid. + ServiceCodeInvalidBlockList ServiceCodeType = "InvalidBlockList" + + // ServiceCodeInvalidOperation means an invalid operation against a blob snapshot. + ServiceCodeInvalidOperation ServiceCodeType = "InvalidOperation" + + // ServiceCodeInvalidPageRange means the page range specified is invalid. + ServiceCodeInvalidPageRange ServiceCodeType = "InvalidPageRange" + + // ServiceCodeInvalidSourceBlobType means the copy source blob type is invalid for this operation. + ServiceCodeInvalidSourceBlobType ServiceCodeType = "InvalidSourceBlobType" + + // ServiceCodeInvalidSourceBlobURL means the source URL for incremental copy request must be valid Azure Storage blob URL. + ServiceCodeInvalidSourceBlobURL ServiceCodeType = "InvalidSourceBlobUrl" + + // ServiceCodeInvalidVersionForPageBlobOperation means that all operations on page blobs require at least version 2009-09-19. + ServiceCodeInvalidVersionForPageBlobOperation ServiceCodeType = "InvalidVersionForPageBlobOperation" + + // ServiceCodeLeaseAlreadyPresent means there is already a lease present. + ServiceCodeLeaseAlreadyPresent ServiceCodeType = "LeaseAlreadyPresent" + + // ServiceCodeLeaseAlreadyBroken means the lease has already been broken and cannot be broken again. + ServiceCodeLeaseAlreadyBroken ServiceCodeType = "LeaseAlreadyBroken" + + // ServiceCodeLeaseIDMismatchWithBlobOperation means the lease ID specified did not match the lease ID for the blob. + ServiceCodeLeaseIDMismatchWithBlobOperation ServiceCodeType = "LeaseIdMismatchWithBlobOperation" + + // ServiceCodeLeaseIDMismatchWithContainerOperation means the lease ID specified did not match the lease ID for the container. + ServiceCodeLeaseIDMismatchWithContainerOperation ServiceCodeType = "LeaseIdMismatchWithContainerOperation" + + // ServiceCodeLeaseIDMismatchWithLeaseOperation means the lease ID specified did not match the lease ID for the blob/container. + ServiceCodeLeaseIDMismatchWithLeaseOperation ServiceCodeType = "LeaseIdMismatchWithLeaseOperation" + + // ServiceCodeLeaseIDMissing means there is currently a lease on the blob/container and no lease ID was specified in the request. + ServiceCodeLeaseIDMissing ServiceCodeType = "LeaseIdMissing" + + // ServiceCodeLeaseIsBreakingAndCannotBeAcquired means the lease ID matched, but the lease is currently in breaking state and cannot be acquired until it is broken. + ServiceCodeLeaseIsBreakingAndCannotBeAcquired ServiceCodeType = "LeaseIsBreakingAndCannotBeAcquired" + + // ServiceCodeLeaseIsBreakingAndCannotBeChanged means the lease ID matched, but the lease is currently in breaking state and cannot be changed. + ServiceCodeLeaseIsBreakingAndCannotBeChanged ServiceCodeType = "LeaseIsBreakingAndCannotBeChanged" + + // ServiceCodeLeaseIsBrokenAndCannotBeRenewed means the lease ID matched, but the lease has been broken explicitly and cannot be renewed. + ServiceCodeLeaseIsBrokenAndCannotBeRenewed ServiceCodeType = "LeaseIsBrokenAndCannotBeRenewed" + + // ServiceCodeLeaseLost means a lease ID was specified, but the lease for the blob/container has expired. + ServiceCodeLeaseLost ServiceCodeType = "LeaseLost" + + // ServiceCodeLeaseNotPresentWithBlobOperation means there is currently no lease on the blob. + ServiceCodeLeaseNotPresentWithBlobOperation ServiceCodeType = "LeaseNotPresentWithBlobOperation" + + // ServiceCodeLeaseNotPresentWithContainerOperation means there is currently no lease on the container. + ServiceCodeLeaseNotPresentWithContainerOperation ServiceCodeType = "LeaseNotPresentWithContainerOperation" + + // ServiceCodeLeaseNotPresentWithLeaseOperation means there is currently no lease on the blob/container. + ServiceCodeLeaseNotPresentWithLeaseOperation ServiceCodeType = "LeaseNotPresentWithLeaseOperation" + + // ServiceCodeMaxBlobSizeConditionNotMet means the max blob size condition specified was not met. + ServiceCodeMaxBlobSizeConditionNotMet ServiceCodeType = "MaxBlobSizeConditionNotMet" + + // ServiceCodeNoPendingCopyOperation means there is currently no pending copy operation. + ServiceCodeNoPendingCopyOperation ServiceCodeType = "NoPendingCopyOperation" + + // ServiceCodeOperationNotAllowedOnIncrementalCopyBlob means the specified operation is not allowed on an incremental copy blob. + ServiceCodeOperationNotAllowedOnIncrementalCopyBlob ServiceCodeType = "OperationNotAllowedOnIncrementalCopyBlob" + + // ServiceCodePendingCopyOperation means there is currently a pending copy operation. + ServiceCodePendingCopyOperation ServiceCodeType = "PendingCopyOperation" + + // ServiceCodePreviousSnapshotCannotBeNewer means the prevsnapshot query parameter value cannot be newer than snapshot query parameter value. + ServiceCodePreviousSnapshotCannotBeNewer ServiceCodeType = "PreviousSnapshotCannotBeNewer" + + // ServiceCodePreviousSnapshotNotFound means the previous snapshot is not found. + ServiceCodePreviousSnapshotNotFound ServiceCodeType = "PreviousSnapshotNotFound" + + // ServiceCodePreviousSnapshotOperationNotSupported means that differential Get Page Ranges is not supported on the previous snapshot. + ServiceCodePreviousSnapshotOperationNotSupported ServiceCodeType = "PreviousSnapshotOperationNotSupported" + + // ServiceCodeSequenceNumberConditionNotMet means the sequence number condition specified was not met. + ServiceCodeSequenceNumberConditionNotMet ServiceCodeType = "SequenceNumberConditionNotMet" + + // ServiceCodeSequenceNumberIncrementTooLarge means the sequence number increment cannot be performed because it would result in overflow of the sequence number. + ServiceCodeSequenceNumberIncrementTooLarge ServiceCodeType = "SequenceNumberIncrementTooLarge" + + // ServiceCodeSnapshotCountExceeded means the snapshot count against this blob has been exceeded. + ServiceCodeSnapshotCountExceeded ServiceCodeType = "SnapshotCountExceeded" + + // ServiceCodeSnaphotOperationRateExceeded means the rate of snapshot operations against this blob has been exceeded. + ServiceCodeSnaphotOperationRateExceeded ServiceCodeType = "SnaphotOperationRateExceeded" + + // ServiceCodeSnapshotsPresent means this operation is not permitted while the blob has snapshots. + ServiceCodeSnapshotsPresent ServiceCodeType = "SnapshotsPresent" + + // ServiceCodeSourceConditionNotMet means the source condition specified using HTTP conditional header(s) is not met. + ServiceCodeSourceConditionNotMet ServiceCodeType = "SourceConditionNotMet" + + // ServiceCodeSystemInUse means this blob is in use by the system. + ServiceCodeSystemInUse ServiceCodeType = "SystemInUse" + + // ServiceCodeTargetConditionNotMet means the target condition specified using HTTP conditional header(s) is not met. + ServiceCodeTargetConditionNotMet ServiceCodeType = "TargetConditionNotMet" + + // ServiceCodeUnauthorizedBlobOverwrite means this request is not authorized to perform blob overwrites. + ServiceCodeUnauthorizedBlobOverwrite ServiceCodeType = "UnauthorizedBlobOverwrite" + + // ServiceCodeBlobBeingRehydrated means this operation is not permitted because the blob is being rehydrated. + ServiceCodeBlobBeingRehydrated ServiceCodeType = "BlobBeingRehydrated" + + // ServiceCodeBlobArchived means this operation is not permitted on an archived blob. + ServiceCodeBlobArchived ServiceCodeType = "BlobArchived" + + // ServiceCodeBlobNotArchived means this blob is currently not in the archived state. + ServiceCodeBlobNotArchived ServiceCodeType = "BlobNotArchived" +) diff --git a/vendor/github.com/Azure/azure-storage-blob-go/2018-03-28/azblob/url_append_blob.go b/vendor/github.com/Azure/azure-storage-blob-go/2018-03-28/azblob/url_append_blob.go new file mode 100644 index 000000000000..b8711f5a1c43 --- /dev/null +++ b/vendor/github.com/Azure/azure-storage-blob-go/2018-03-28/azblob/url_append_blob.go @@ -0,0 +1,112 @@ +package azblob + +import ( + "context" + "io" + "net/url" + + "github.com/Azure/azure-pipeline-go/pipeline" +) + +const ( + // AppendBlobMaxAppendBlockBytes indicates the maximum number of bytes that can be sent in a call to AppendBlock. + AppendBlobMaxAppendBlockBytes = 4 * 1024 * 1024 // 4MB + + // AppendBlobMaxBlocks indicates the maximum number of blocks allowed in an append blob. + AppendBlobMaxBlocks = 50000 +) + +// AppendBlobURL defines a set of operations applicable to append blobs. +type AppendBlobURL struct { + BlobURL + abClient appendBlobClient +} + +// NewAppendBlobURL creates an AppendBlobURL object using the specified URL and request policy pipeline. +func NewAppendBlobURL(url url.URL, p pipeline.Pipeline) AppendBlobURL { + blobClient := newBlobClient(url, p) + abClient := newAppendBlobClient(url, p) + return AppendBlobURL{BlobURL: BlobURL{blobClient: blobClient}, abClient: abClient} +} + +// WithPipeline creates a new AppendBlobURL object identical to the source but with the specific request policy pipeline. +func (ab AppendBlobURL) WithPipeline(p pipeline.Pipeline) AppendBlobURL { + return NewAppendBlobURL(ab.blobClient.URL(), p) +} + +// WithSnapshot creates a new AppendBlobURL object identical to the source but with the specified snapshot timestamp. +// Pass "" to remove the snapshot returning a URL to the base blob. +func (ab AppendBlobURL) WithSnapshot(snapshot string) AppendBlobURL { + p := NewBlobURLParts(ab.URL()) + p.Snapshot = snapshot + return NewAppendBlobURL(p.URL(), ab.blobClient.Pipeline()) +} + +// Create creates a 0-length append blob. Call AppendBlock to append data to an append blob. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/put-blob. +func (ab AppendBlobURL) Create(ctx context.Context, h BlobHTTPHeaders, metadata Metadata, ac BlobAccessConditions) (*AppendBlobCreateResponse, error) { + ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch := ac.HTTPAccessConditions.pointers() + return ab.abClient.Create(ctx, 0, nil, + &h.ContentType, &h.ContentEncoding, &h.ContentLanguage, h.ContentMD5, + &h.CacheControl, metadata, ac.LeaseAccessConditions.pointers(), &h.ContentDisposition, + ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, nil) +} + +// AppendBlock writes a stream to a new block of data to the end of the existing append blob. +// This method panics if the stream is not at position 0. +// Note that the http client closes the body stream after the request is sent to the service. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/append-block. +func (ab AppendBlobURL) AppendBlock(ctx context.Context, body io.ReadSeeker, ac BlobAccessConditions) (*AppendBlobAppendBlockResponse, error) { + ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.HTTPAccessConditions.pointers() + ifAppendPositionEqual, ifMaxSizeLessThanOrEqual := ac.AppendBlobAccessConditions.pointers() + return ab.abClient.AppendBlock(ctx, body, validateSeekableStreamAt0AndGetCount(body), nil, + ac.LeaseAccessConditions.pointers(), + ifMaxSizeLessThanOrEqual, ifAppendPositionEqual, + ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, nil) +} + +// AppendBlobAccessConditions identifies append blob-specific access conditions which you optionally set. +type AppendBlobAccessConditions struct { + // IfAppendPositionEqual ensures that the AppendBlock operation succeeds + // only if the append position is equal to a value. + // IfAppendPositionEqual=0 means no 'IfAppendPositionEqual' header specified. + // IfAppendPositionEqual>0 means 'IfAppendPositionEqual' header specified with its value + // IfAppendPositionEqual==-1 means IfAppendPositionEqual' header specified with a value of 0 + IfAppendPositionEqual int64 + + // IfMaxSizeLessThanOrEqual ensures that the AppendBlock operation succeeds + // only if the append blob's size is less than or equal to a value. + // IfMaxSizeLessThanOrEqual=0 means no 'IfMaxSizeLessThanOrEqual' header specified. + // IfMaxSizeLessThanOrEqual>0 means 'IfMaxSizeLessThanOrEqual' header specified with its value + // IfMaxSizeLessThanOrEqual==-1 means 'IfMaxSizeLessThanOrEqual' header specified with a value of 0 + IfMaxSizeLessThanOrEqual int64 +} + +// pointers is for internal infrastructure. It returns the fields as pointers. +func (ac AppendBlobAccessConditions) pointers() (iape *int64, imsltoe *int64) { + if ac.IfAppendPositionEqual < -1 { + panic("IfAppendPositionEqual can't be less than -1") + } + if ac.IfMaxSizeLessThanOrEqual < -1 { + panic("IfMaxSizeLessThanOrEqual can't be less than -1") + } + var zero int64 // defaults to 0 + switch ac.IfAppendPositionEqual { + case -1: + iape = &zero + case 0: + iape = nil + default: + iape = &ac.IfAppendPositionEqual + } + + switch ac.IfMaxSizeLessThanOrEqual { + case -1: + imsltoe = &zero + case 0: + imsltoe = nil + default: + imsltoe = &ac.IfMaxSizeLessThanOrEqual + } + return +} diff --git a/vendor/github.com/Azure/azure-storage-blob-go/2018-03-28/azblob/url_blob.go b/vendor/github.com/Azure/azure-storage-blob-go/2018-03-28/azblob/url_blob.go new file mode 100644 index 000000000000..96d85cbfd8d3 --- /dev/null +++ b/vendor/github.com/Azure/azure-storage-blob-go/2018-03-28/azblob/url_blob.go @@ -0,0 +1,222 @@ +package azblob + +import ( + "context" + "net/url" + + "github.com/Azure/azure-pipeline-go/pipeline" +) + +// A BlobURL represents a URL to an Azure Storage blob; the blob may be a block blob, append blob, or page blob. +type BlobURL struct { + blobClient blobClient +} + +// NewBlobURL creates a BlobURL object using the specified URL and request policy pipeline. +func NewBlobURL(url url.URL, p pipeline.Pipeline) BlobURL { + if p == nil { + panic("p can't be nil") + } + blobClient := newBlobClient(url, p) + return BlobURL{blobClient: blobClient} +} + +// URL returns the URL endpoint used by the BlobURL object. +func (b BlobURL) URL() url.URL { + return b.blobClient.URL() +} + +// String returns the URL as a string. +func (b BlobURL) String() string { + u := b.URL() + return u.String() +} + +// WithPipeline creates a new BlobURL object identical to the source but with the specified request policy pipeline. +func (b BlobURL) WithPipeline(p pipeline.Pipeline) BlobURL { + if p == nil { + panic("p can't be nil") + } + return NewBlobURL(b.blobClient.URL(), p) +} + +// WithSnapshot creates a new BlobURL object identical to the source but with the specified snapshot timestamp. +// Pass "" to remove the snapshot returning a URL to the base blob. +func (b BlobURL) WithSnapshot(snapshot string) BlobURL { + p := NewBlobURLParts(b.URL()) + p.Snapshot = snapshot + return NewBlobURL(p.URL(), b.blobClient.Pipeline()) +} + +// ToAppendBlobURL creates an AppendBlobURL using the source's URL and pipeline. +func (b BlobURL) ToAppendBlobURL() AppendBlobURL { + return NewAppendBlobURL(b.URL(), b.blobClient.Pipeline()) +} + +// ToBlockBlobURL creates a BlockBlobURL using the source's URL and pipeline. +func (b BlobURL) ToBlockBlobURL() BlockBlobURL { + return NewBlockBlobURL(b.URL(), b.blobClient.Pipeline()) +} + +// ToPageBlobURL creates a PageBlobURL using the source's URL and pipeline. +func (b BlobURL) ToPageBlobURL() PageBlobURL { + return NewPageBlobURL(b.URL(), b.blobClient.Pipeline()) +} + +// DownloadBlob reads a range of bytes from a blob. The response also includes the blob's properties and metadata. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/get-blob. +func (b BlobURL) Download(ctx context.Context, offset int64, count int64, ac BlobAccessConditions, rangeGetContentMD5 bool) (*DownloadResponse, error) { + var xRangeGetContentMD5 *bool + if rangeGetContentMD5 { + xRangeGetContentMD5 = &rangeGetContentMD5 + } + ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.HTTPAccessConditions.pointers() + dr, err := b.blobClient.Download(ctx, nil, nil, + httpRange{offset: offset, count: count}.pointers(), + ac.LeaseAccessConditions.pointers(), xRangeGetContentMD5, + ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, nil) + if err != nil { + return nil, err + } + return &DownloadResponse{ + b: b, + r: dr, + ctx: ctx, + getInfo: HTTPGetterInfo{Offset: offset, Count: count, ETag: dr.ETag()}, + }, err +} + +// DeleteBlob marks the specified blob or snapshot for deletion. The blob is later deleted during garbage collection. +// Note that deleting a blob also deletes all its snapshots. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/delete-blob. +func (b BlobURL) Delete(ctx context.Context, deleteOptions DeleteSnapshotsOptionType, ac BlobAccessConditions) (*BlobDeleteResponse, error) { + ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.HTTPAccessConditions.pointers() + return b.blobClient.Delete(ctx, nil, nil, ac.LeaseAccessConditions.pointers(), deleteOptions, + ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, nil) +} + +// Undelete restores the contents and metadata of a soft-deleted blob and any associated soft-deleted snapshots. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/undelete-blob. +func (b BlobURL) Undelete(ctx context.Context) (*BlobUndeleteResponse, error) { + return b.blobClient.Undelete(ctx, nil, nil) +} + +// SetTier operation sets the tier on a blob. The operation is allowed on a page +// blob in a premium storage account and on a block blob in a blob storage account (locally +// redundant storage only). A premium page blob's tier determines the allowed size, IOPS, and +// bandwidth of the blob. A block blob's tier determines Hot/Cool/Archive storage type. This operation +// does not update the blob's ETag. +// For detailed information about block blob level tiering see https://docs.microsoft.com/en-us/azure/storage/blobs/storage-blob-storage-tiers. +func (b BlobURL) SetTier(ctx context.Context, tier AccessTierType) (*BlobSetTierResponse, error) { + return b.blobClient.SetTier(ctx, tier, nil, nil) +} + +// GetBlobProperties returns the blob's properties. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/get-blob-properties. +func (b BlobURL) GetProperties(ctx context.Context, ac BlobAccessConditions) (*BlobGetPropertiesResponse, error) { + ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.HTTPAccessConditions.pointers() + return b.blobClient.GetProperties(ctx, nil, nil, ac.LeaseAccessConditions.pointers(), + ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, nil) +} + +// SetBlobHTTPHeaders changes a blob's HTTP headers. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/set-blob-properties. +func (b BlobURL) SetHTTPHeaders(ctx context.Context, h BlobHTTPHeaders, ac BlobAccessConditions) (*BlobSetHTTPHeadersResponse, error) { + ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.HTTPAccessConditions.pointers() + return b.blobClient.SetHTTPHeaders(ctx, nil, + &h.CacheControl, &h.ContentType, h.ContentMD5, &h.ContentEncoding, &h.ContentLanguage, + ac.LeaseAccessConditions.pointers(), ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, + &h.ContentDisposition, nil) +} + +// SetBlobMetadata changes a blob's metadata. +// https://docs.microsoft.com/rest/api/storageservices/set-blob-metadata. +func (b BlobURL) SetMetadata(ctx context.Context, metadata Metadata, ac BlobAccessConditions) (*BlobSetMetadataResponse, error) { + ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.HTTPAccessConditions.pointers() + return b.blobClient.SetMetadata(ctx, nil, metadata, ac.LeaseAccessConditions.pointers(), + ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, nil) +} + +// CreateSnapshot creates a read-only snapshot of a blob. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/snapshot-blob. +func (b BlobURL) CreateSnapshot(ctx context.Context, metadata Metadata, ac BlobAccessConditions) (*BlobCreateSnapshotResponse, error) { + // CreateSnapshot does NOT panic if the user tries to create a snapshot using a URL that already has a snapshot query parameter + // because checking this would be a performance hit for a VERY unusual path and I don't think the common case should suffer this + // performance hit. + ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.HTTPAccessConditions.pointers() + return b.blobClient.CreateSnapshot(ctx, nil, metadata, ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, ac.LeaseAccessConditions.pointers(), nil) +} + +// AcquireLease acquires a lease on the blob for write and delete operations. The lease duration must be between +// 15 to 60 seconds, or infinite (-1). +// For more information, see https://docs.microsoft.com/rest/api/storageservices/lease-blob. +func (b BlobURL) AcquireLease(ctx context.Context, proposedID string, duration int32, ac HTTPAccessConditions) (*BlobAcquireLeaseResponse, error) { + ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.pointers() + return b.blobClient.AcquireLease(ctx, nil, &duration, &proposedID, + ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, nil) +} + +// RenewLease renews the blob's previously-acquired lease. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/lease-blob. +func (b BlobURL) RenewLease(ctx context.Context, leaseID string, ac HTTPAccessConditions) (*BlobRenewLeaseResponse, error) { + ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.pointers() + return b.blobClient.RenewLease(ctx, leaseID, nil, + ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, nil) +} + +// ReleaseLease releases the blob's previously-acquired lease. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/lease-blob. +func (b BlobURL) ReleaseLease(ctx context.Context, leaseID string, ac HTTPAccessConditions) (*BlobReleaseLeaseResponse, error) { + ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.pointers() + return b.blobClient.ReleaseLease(ctx, leaseID, nil, + ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, nil) +} + +// BreakLease breaks the blob's previously-acquired lease (if it exists). Pass the LeaseBreakDefault (-1) +// constant to break a fixed-duration lease when it expires or an infinite lease immediately. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/lease-blob. +func (b BlobURL) BreakLease(ctx context.Context, breakPeriodInSeconds int32, ac HTTPAccessConditions) (*BlobBreakLeaseResponse, error) { + ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.pointers() + return b.blobClient.BreakLease(ctx, nil, leasePeriodPointer(breakPeriodInSeconds), + ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, nil) +} + +// ChangeLease changes the blob's lease ID. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/lease-blob. +func (b BlobURL) ChangeLease(ctx context.Context, leaseID string, proposedID string, ac HTTPAccessConditions) (*BlobChangeLeaseResponse, error) { + ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.pointers() + return b.blobClient.ChangeLease(ctx, leaseID, proposedID, + nil, ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, nil) +} + +// LeaseBreakNaturally tells ContainerURL's or BlobURL's BreakLease method to break the lease using service semantics. +const LeaseBreakNaturally = -1 + +func leasePeriodPointer(period int32) (p *int32) { + if period != LeaseBreakNaturally { + p = &period + } + return nil +} + +// StartCopyFromURL copies the data at the source URL to a blob. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/copy-blob. +func (b BlobURL) StartCopyFromURL(ctx context.Context, source url.URL, metadata Metadata, srcac BlobAccessConditions, dstac BlobAccessConditions) (*BlobStartCopyFromURLResponse, error) { + srcIfModifiedSince, srcIfUnmodifiedSince, srcIfMatchETag, srcIfNoneMatchETag := srcac.HTTPAccessConditions.pointers() + dstIfModifiedSince, dstIfUnmodifiedSince, dstIfMatchETag, dstIfNoneMatchETag := dstac.HTTPAccessConditions.pointers() + srcLeaseID := srcac.LeaseAccessConditions.pointers() + dstLeaseID := dstac.LeaseAccessConditions.pointers() + + return b.blobClient.StartCopyFromURL(ctx, source.String(), nil, metadata, + srcIfModifiedSince, srcIfUnmodifiedSince, + srcIfMatchETag, srcIfNoneMatchETag, + dstIfModifiedSince, dstIfUnmodifiedSince, + dstIfMatchETag, dstIfNoneMatchETag, + dstLeaseID, srcLeaseID, nil) +} + +// AbortCopyFromURL stops a pending copy that was previously started and leaves a destination blob with 0 length and metadata. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/abort-copy-blob. +func (b BlobURL) AbortCopyFromURL(ctx context.Context, copyID string, ac LeaseAccessConditions) (*BlobAbortCopyFromURLResponse, error) { + return b.blobClient.AbortCopyFromURL(ctx, copyID, nil, ac.pointers(), nil) +} diff --git a/vendor/github.com/Azure/azure-storage-blob-go/2018-03-28/azblob/url_block_blob.go b/vendor/github.com/Azure/azure-storage-blob-go/2018-03-28/azblob/url_block_blob.go new file mode 100644 index 000000000000..ec7028558763 --- /dev/null +++ b/vendor/github.com/Azure/azure-storage-blob-go/2018-03-28/azblob/url_block_blob.go @@ -0,0 +1,157 @@ +package azblob + +import ( + "context" + "io" + "net/url" + + "encoding/base64" + "encoding/binary" + + "github.com/Azure/azure-pipeline-go/pipeline" +) + +const ( + // BlockBlobMaxPutBlobBytes indicates the maximum number of bytes that can be sent in a call to Upload. + BlockBlobMaxUploadBlobBytes = 256 * 1024 * 1024 // 256MB + + // BlockBlobMaxStageBlockBytes indicates the maximum number of bytes that can be sent in a call to StageBlock. + BlockBlobMaxStageBlockBytes = 100 * 1024 * 1024 // 100MB + + // BlockBlobMaxBlocks indicates the maximum number of blocks allowed in a block blob. + BlockBlobMaxBlocks = 50000 +) + +// BlockBlobURL defines a set of operations applicable to block blobs. +type BlockBlobURL struct { + BlobURL + bbClient blockBlobClient +} + +// NewBlockBlobURL creates a BlockBlobURL object using the specified URL and request policy pipeline. +func NewBlockBlobURL(url url.URL, p pipeline.Pipeline) BlockBlobURL { + if p == nil { + panic("p can't be nil") + } + blobClient := newBlobClient(url, p) + bbClient := newBlockBlobClient(url, p) + return BlockBlobURL{BlobURL: BlobURL{blobClient: blobClient}, bbClient: bbClient} +} + +// WithPipeline creates a new BlockBlobURL object identical to the source but with the specific request policy pipeline. +func (bb BlockBlobURL) WithPipeline(p pipeline.Pipeline) BlockBlobURL { + return NewBlockBlobURL(bb.blobClient.URL(), p) +} + +// WithSnapshot creates a new BlockBlobURL object identical to the source but with the specified snapshot timestamp. +// Pass "" to remove the snapshot returning a URL to the base blob. +func (bb BlockBlobURL) WithSnapshot(snapshot string) BlockBlobURL { + p := NewBlobURLParts(bb.URL()) + p.Snapshot = snapshot + return NewBlockBlobURL(p.URL(), bb.blobClient.Pipeline()) +} + +// Upload creates a new block blob or overwrites an existing block blob. +// Updating an existing block blob overwrites any existing metadata on the blob. Partial updates are not +// supported with Upload; the content of the existing blob is overwritten with the new content. To +// perform a partial update of a block blob, use StageBlock and CommitBlockList. +// This method panics if the stream is not at position 0. +// Note that the http client closes the body stream after the request is sent to the service. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/put-blob. +func (bb BlockBlobURL) Upload(ctx context.Context, body io.ReadSeeker, h BlobHTTPHeaders, metadata Metadata, ac BlobAccessConditions) (*BlockBlobUploadResponse, error) { + ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.HTTPAccessConditions.pointers() + return bb.bbClient.Upload(ctx, body, validateSeekableStreamAt0AndGetCount(body), nil, + &h.ContentType, &h.ContentEncoding, &h.ContentLanguage, h.ContentMD5, + &h.CacheControl, metadata, ac.LeaseAccessConditions.pointers(), + &h.ContentDisposition, ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, + nil) +} + +// StageBlock uploads the specified block to the block blob's "staging area" to be later committed by a call to CommitBlockList. +// Note that the http client closes the body stream after the request is sent to the service. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/put-block. +func (bb BlockBlobURL) StageBlock(ctx context.Context, base64BlockID string, body io.ReadSeeker, ac LeaseAccessConditions) (*BlockBlobStageBlockResponse, error) { + return bb.bbClient.StageBlock(ctx, base64BlockID, validateSeekableStreamAt0AndGetCount(body), body, nil, ac.pointers(), nil) +} + +// StageBlockFromURL copies the specified block from a source URL to the block blob's "staging area" to be later committed by a call to CommitBlockList. +// If count is CountToEnd (0), then data is read from specified offset to the end. +// For more information, see https://docs.microsoft.com/en-us/rest/api/storageservices/put-block-from-url. +func (bb BlockBlobURL) StageBlockFromURL(ctx context.Context, base64BlockID string, sourceURL url.URL, offset int64, count int64, ac LeaseAccessConditions) (*BlockBlobStageBlockFromURLResponse, error) { + sourceURLStr := sourceURL.String() + return bb.bbClient.StageBlockFromURL(ctx, base64BlockID, 0, &sourceURLStr, httpRange{offset: offset, count: count}.pointers(), nil, nil, ac.pointers(), nil) +} + +// CommitBlockList writes a blob by specifying the list of block IDs that make up the blob. +// In order to be written as part of a blob, a block must have been successfully written +// to the server in a prior PutBlock operation. You can call PutBlockList to update a blob +// by uploading only those blocks that have changed, then committing the new and existing +// blocks together. Any blocks not specified in the block list and permanently deleted. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/put-block-list. +func (bb BlockBlobURL) CommitBlockList(ctx context.Context, base64BlockIDs []string, h BlobHTTPHeaders, + metadata Metadata, ac BlobAccessConditions) (*BlockBlobCommitBlockListResponse, error) { + ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.HTTPAccessConditions.pointers() + return bb.bbClient.CommitBlockList(ctx, BlockLookupList{Latest: base64BlockIDs}, nil, + &h.CacheControl, &h.ContentType, &h.ContentEncoding, &h.ContentLanguage, h.ContentMD5, + metadata, ac.LeaseAccessConditions.pointers(), &h.ContentDisposition, + ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, nil) +} + +// GetBlockList returns the list of blocks that have been uploaded as part of a block blob using the specified block list filter. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/get-block-list. +func (bb BlockBlobURL) GetBlockList(ctx context.Context, listType BlockListType, ac LeaseAccessConditions) (*BlockList, error) { + return bb.bbClient.GetBlockList(ctx, listType, nil, nil, ac.pointers(), nil) +} + +////////////////////////////////////////////////////////////////////////////////////////////////////////////// + +type BlockID [64]byte + +func (blockID BlockID) ToBase64() string { + return base64.StdEncoding.EncodeToString(blockID[:]) +} + +func (blockID *BlockID) FromBase64(s string) error { + *blockID = BlockID{} // Zero out the block ID + _, err := base64.StdEncoding.Decode(blockID[:], ([]byte)(s)) + return err +} + +////////////////////////////////////////////////////////////////////////////////////////////////////////////// + +type uuidBlockID BlockID + +func (ubi uuidBlockID) UUID() uuid { + u := uuid{} + copy(u[:], ubi[:len(u)]) + return u +} + +func (ubi uuidBlockID) Number() uint32 { + return binary.BigEndian.Uint32(ubi[len(uuid{}):]) +} + +func newUuidBlockID(u uuid) uuidBlockID { + ubi := uuidBlockID{} // Create a new uuidBlockID + copy(ubi[:len(u)], u[:]) // Copy the specified UUID into it + // Block number defaults to 0 + return ubi +} + +func (ubi *uuidBlockID) SetUUID(u uuid) *uuidBlockID { + copy(ubi[:len(u)], u[:]) + return ubi +} + +func (ubi uuidBlockID) WithBlockNumber(blockNumber uint32) uuidBlockID { + binary.BigEndian.PutUint32(ubi[len(uuid{}):], blockNumber) // Put block number after UUID + return ubi // Return the passed-in copy +} + +func (ubi uuidBlockID) ToBase64() string { + return BlockID(ubi).ToBase64() +} + +func (ubi *uuidBlockID) FromBase64(s string) error { + return (*BlockID)(ubi).FromBase64(s) +} diff --git a/vendor/github.com/Azure/azure-storage-blob-go/2018-03-28/azblob/url_container.go b/vendor/github.com/Azure/azure-storage-blob-go/2018-03-28/azblob/url_container.go new file mode 100644 index 000000000000..0fad9f07674e --- /dev/null +++ b/vendor/github.com/Azure/azure-storage-blob-go/2018-03-28/azblob/url_container.go @@ -0,0 +1,300 @@ +package azblob + +import ( + "bytes" + "context" + "fmt" + "net/url" + + "github.com/Azure/azure-pipeline-go/pipeline" +) + +// A ContainerURL represents a URL to the Azure Storage container allowing you to manipulate its blobs. +type ContainerURL struct { + client containerClient +} + +// NewContainerURL creates a ContainerURL object using the specified URL and request policy pipeline. +func NewContainerURL(url url.URL, p pipeline.Pipeline) ContainerURL { + if p == nil { + panic("p can't be nil") + } + client := newContainerClient(url, p) + return ContainerURL{client: client} +} + +// URL returns the URL endpoint used by the ContainerURL object. +func (c ContainerURL) URL() url.URL { + return c.client.URL() +} + +// String returns the URL as a string. +func (c ContainerURL) String() string { + u := c.URL() + return u.String() +} + +// WithPipeline creates a new ContainerURL object identical to the source but with the specified request policy pipeline. +func (c ContainerURL) WithPipeline(p pipeline.Pipeline) ContainerURL { + return NewContainerURL(c.URL(), p) +} + +// NewBlobURL creates a new BlobURL object by concatenating blobName to the end of +// ContainerURL's URL. The new BlobURL uses the same request policy pipeline as the ContainerURL. +// To change the pipeline, create the BlobURL and then call its WithPipeline method passing in the +// desired pipeline object. Or, call this package's NewBlobURL instead of calling this object's +// NewBlobURL method. +func (c ContainerURL) NewBlobURL(blobName string) BlobURL { + blobURL := appendToURLPath(c.URL(), blobName) + return NewBlobURL(blobURL, c.client.Pipeline()) +} + +// NewAppendBlobURL creates a new AppendBlobURL object by concatenating blobName to the end of +// ContainerURL's URL. The new AppendBlobURL uses the same request policy pipeline as the ContainerURL. +// To change the pipeline, create the AppendBlobURL and then call its WithPipeline method passing in the +// desired pipeline object. Or, call this package's NewAppendBlobURL instead of calling this object's +// NewAppendBlobURL method. +func (c ContainerURL) NewAppendBlobURL(blobName string) AppendBlobURL { + blobURL := appendToURLPath(c.URL(), blobName) + return NewAppendBlobURL(blobURL, c.client.Pipeline()) +} + +// NewBlockBlobURL creates a new BlockBlobURL object by concatenating blobName to the end of +// ContainerURL's URL. The new BlockBlobURL uses the same request policy pipeline as the ContainerURL. +// To change the pipeline, create the BlockBlobURL and then call its WithPipeline method passing in the +// desired pipeline object. Or, call this package's NewBlockBlobURL instead of calling this object's +// NewBlockBlobURL method. +func (c ContainerURL) NewBlockBlobURL(blobName string) BlockBlobURL { + blobURL := appendToURLPath(c.URL(), blobName) + return NewBlockBlobURL(blobURL, c.client.Pipeline()) +} + +// NewPageBlobURL creates a new PageBlobURL object by concatenating blobName to the end of +// ContainerURL's URL. The new PageBlobURL uses the same request policy pipeline as the ContainerURL. +// To change the pipeline, create the PageBlobURL and then call its WithPipeline method passing in the +// desired pipeline object. Or, call this package's NewPageBlobURL instead of calling this object's +// NewPageBlobURL method. +func (c ContainerURL) NewPageBlobURL(blobName string) PageBlobURL { + blobURL := appendToURLPath(c.URL(), blobName) + return NewPageBlobURL(blobURL, c.client.Pipeline()) +} + +// Create creates a new container within a storage account. If a container with the same name already exists, the operation fails. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/create-container. +func (c ContainerURL) Create(ctx context.Context, metadata Metadata, publicAccessType PublicAccessType) (*ContainerCreateResponse, error) { + return c.client.Create(ctx, nil, metadata, publicAccessType, nil) +} + +// Delete marks the specified container for deletion. The container and any blobs contained within it are later deleted during garbage collection. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/delete-container. +func (c ContainerURL) Delete(ctx context.Context, ac ContainerAccessConditions) (*ContainerDeleteResponse, error) { + if ac.IfMatch != ETagNone || ac.IfNoneMatch != ETagNone { + panic("the IfMatch and IfNoneMatch access conditions must have their default values because they are ignored by the service") + } + + ifModifiedSince, ifUnmodifiedSince, _, _ := ac.HTTPAccessConditions.pointers() + return c.client.Delete(ctx, nil, ac.LeaseAccessConditions.pointers(), + ifModifiedSince, ifUnmodifiedSince, nil) +} + +// GetProperties returns the container's properties. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/get-container-metadata. +func (c ContainerURL) GetProperties(ctx context.Context, ac LeaseAccessConditions) (*ContainerGetPropertiesResponse, error) { + // NOTE: GetMetadata actually calls GetProperties internally because GetProperties returns the metadata AND the properties. + // This allows us to not expose a GetProperties method at all simplifying the API. + return c.client.GetProperties(ctx, nil, ac.pointers(), nil) +} + +// SetMetadata sets the container's metadata. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/set-container-metadata. +func (c ContainerURL) SetMetadata(ctx context.Context, metadata Metadata, ac ContainerAccessConditions) (*ContainerSetMetadataResponse, error) { + if !ac.IfUnmodifiedSince.IsZero() || ac.IfMatch != ETagNone || ac.IfNoneMatch != ETagNone { + panic("the IfUnmodifiedSince, IfMatch, and IfNoneMatch must have their default values because they are ignored by the blob service") + } + ifModifiedSince, _, _, _ := ac.HTTPAccessConditions.pointers() + return c.client.SetMetadata(ctx, nil, ac.LeaseAccessConditions.pointers(), metadata, ifModifiedSince, nil) +} + +// GetAccessPolicy returns the container's access policy. The access policy indicates whether container's blobs may be accessed publicly. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/get-container-acl. +func (c ContainerURL) GetAccessPolicy(ctx context.Context, ac LeaseAccessConditions) (*SignedIdentifiers, error) { + return c.client.GetAccessPolicy(ctx, nil, ac.pointers(), nil) +} + +// The AccessPolicyPermission type simplifies creating the permissions string for a container's access policy. +// Initialize an instance of this type and then call its String method to set AccessPolicy's Permission field. +type AccessPolicyPermission struct { + Read, Add, Create, Write, Delete, List bool +} + +// String produces the access policy permission string for an Azure Storage container. +// Call this method to set AccessPolicy's Permission field. +func (p AccessPolicyPermission) String() string { + var b bytes.Buffer + if p.Read { + b.WriteRune('r') + } + if p.Add { + b.WriteRune('a') + } + if p.Create { + b.WriteRune('c') + } + if p.Write { + b.WriteRune('w') + } + if p.Delete { + b.WriteRune('d') + } + if p.List { + b.WriteRune('l') + } + return b.String() +} + +// Parse initializes the AccessPolicyPermission's fields from a string. +func (p *AccessPolicyPermission) Parse(s string) error { + *p = AccessPolicyPermission{} // Clear the flags + for _, r := range s { + switch r { + case 'r': + p.Read = true + case 'a': + p.Add = true + case 'c': + p.Create = true + case 'w': + p.Write = true + case 'd': + p.Delete = true + case 'l': + p.List = true + default: + return fmt.Errorf("invalid permission: '%v'", r) + } + } + return nil +} + +// SetAccessPolicy sets the container's permissions. The access policy indicates whether blobs in a container may be accessed publicly. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/set-container-acl. +func (c ContainerURL) SetAccessPolicy(ctx context.Context, accessType PublicAccessType, si []SignedIdentifier, + ac ContainerAccessConditions) (*ContainerSetAccessPolicyResponse, error) { + if ac.IfMatch != ETagNone || ac.IfNoneMatch != ETagNone { + panic("the IfMatch and IfNoneMatch access conditions must have their default values because they are ignored by the service") + } + ifModifiedSince, ifUnmodifiedSince, _, _ := ac.HTTPAccessConditions.pointers() + return c.client.SetAccessPolicy(ctx, si, nil, ac.LeaseAccessConditions.pointers(), + accessType, ifModifiedSince, ifUnmodifiedSince, nil) +} + +// AcquireLease acquires a lease on the container for delete operations. The lease duration must be between 15 to 60 seconds, or infinite (-1). +// For more information, see https://docs.microsoft.com/rest/api/storageservices/lease-container. +func (c ContainerURL) AcquireLease(ctx context.Context, proposedID string, duration int32, ac HTTPAccessConditions) (*ContainerAcquireLeaseResponse, error) { + ifModifiedSince, ifUnmodifiedSince, _, _ := ac.pointers() + return c.client.AcquireLease(ctx, nil, &duration, &proposedID, + ifModifiedSince, ifUnmodifiedSince, nil) +} + +// RenewLease renews the container's previously-acquired lease. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/lease-container. +func (c ContainerURL) RenewLease(ctx context.Context, leaseID string, ac HTTPAccessConditions) (*ContainerRenewLeaseResponse, error) { + ifModifiedSince, ifUnmodifiedSince, _, _ := ac.pointers() + return c.client.RenewLease(ctx, leaseID, nil, ifModifiedSince, ifUnmodifiedSince, nil) +} + +// ReleaseLease releases the container's previously-acquired lease. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/lease-container. +func (c ContainerURL) ReleaseLease(ctx context.Context, leaseID string, ac HTTPAccessConditions) (*ContainerReleaseLeaseResponse, error) { + ifModifiedSince, ifUnmodifiedSince, _, _ := ac.pointers() + return c.client.ReleaseLease(ctx, leaseID, nil, ifModifiedSince, ifUnmodifiedSince, nil) +} + +// BreakLease breaks the container's previously-acquired lease (if it exists). +// For more information, see https://docs.microsoft.com/rest/api/storageservices/lease-container. +func (c ContainerURL) BreakLease(ctx context.Context, period int32, ac HTTPAccessConditions) (*ContainerBreakLeaseResponse, error) { + ifModifiedSince, ifUnmodifiedSince, _, _ := ac.pointers() + return c.client.BreakLease(ctx, nil, leasePeriodPointer(period), ifModifiedSince, ifUnmodifiedSince, nil) +} + +// ChangeLease changes the container's lease ID. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/lease-container. +func (c ContainerURL) ChangeLease(ctx context.Context, leaseID string, proposedID string, ac HTTPAccessConditions) (*ContainerChangeLeaseResponse, error) { + ifModifiedSince, ifUnmodifiedSince, _, _ := ac.pointers() + return c.client.ChangeLease(ctx, leaseID, proposedID, nil, ifModifiedSince, ifUnmodifiedSince, nil) +} + +// ListBlobsFlatSegment returns a single segment of blobs starting from the specified Marker. Use an empty +// Marker to start enumeration from the beginning. Blob names are returned in lexicographic order. +// After getting a segment, process it, and then call ListBlobsFlatSegment again (passing the the +// previously-returned Marker) to get the next segment. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/list-blobs. +func (c ContainerURL) ListBlobsFlatSegment(ctx context.Context, marker Marker, o ListBlobsSegmentOptions) (*ListBlobsFlatSegmentResponse, error) { + prefix, include, maxResults := o.pointers() + return c.client.ListBlobFlatSegment(ctx, prefix, marker.val, maxResults, include, nil, nil) +} + +// ListBlobsHierarchySegment returns a single segment of blobs starting from the specified Marker. Use an empty +// Marker to start enumeration from the beginning. Blob names are returned in lexicographic order. +// After getting a segment, process it, and then call ListBlobsHierarchicalSegment again (passing the the +// previously-returned Marker) to get the next segment. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/list-blobs. +func (c ContainerURL) ListBlobsHierarchySegment(ctx context.Context, marker Marker, delimiter string, o ListBlobsSegmentOptions) (*ListBlobsHierarchySegmentResponse, error) { + if o.Details.Snapshots { + panic("snapshots are not supported in this listing operation") + } + prefix, include, maxResults := o.pointers() + return c.client.ListBlobHierarchySegment(ctx, delimiter, prefix, marker.val, maxResults, include, nil, nil) +} + +// ListBlobsSegmentOptions defines options available when calling ListBlobs. +type ListBlobsSegmentOptions struct { + Details BlobListingDetails // No IncludeType header is produced if "" + Prefix string // No Prefix header is produced if "" + + // SetMaxResults sets the maximum desired results you want the service to return. Note, the + // service may return fewer results than requested. + // MaxResults=0 means no 'MaxResults' header specified. + MaxResults int32 +} + +func (o *ListBlobsSegmentOptions) pointers() (prefix *string, include []ListBlobsIncludeItemType, maxResults *int32) { + if o.Prefix != "" { + prefix = &o.Prefix + } + include = o.Details.slice() + if o.MaxResults != 0 { + if o.MaxResults < 0 { + panic("MaxResults must be >= 0") + } + maxResults = &o.MaxResults + } + return +} + +// BlobListingDetails indicates what additional information the service should return with each blob. +type BlobListingDetails struct { + Copy, Metadata, Snapshots, UncommittedBlobs, Deleted bool +} + +// string produces the Include query parameter's value. +func (d *BlobListingDetails) slice() []ListBlobsIncludeItemType { + items := []ListBlobsIncludeItemType{} + // NOTE: Multiple strings MUST be appended in alphabetic order or signing the string for authentication fails! + if d.Copy { + items = append(items, ListBlobsIncludeItemCopy) + } + if d.Deleted { + items = append(items, ListBlobsIncludeItemDeleted) + } + if d.Metadata { + items = append(items, ListBlobsIncludeItemMetadata) + } + if d.Snapshots { + items = append(items, ListBlobsIncludeItemSnapshots) + } + if d.UncommittedBlobs { + items = append(items, ListBlobsIncludeItemUncommittedblobs) + } + return items +} diff --git a/vendor/github.com/Azure/azure-storage-blob-go/2018-03-28/azblob/url_page_blob.go b/vendor/github.com/Azure/azure-storage-blob-go/2018-03-28/azblob/url_page_blob.go new file mode 100644 index 000000000000..fa87ef8527e2 --- /dev/null +++ b/vendor/github.com/Azure/azure-storage-blob-go/2018-03-28/azblob/url_page_blob.go @@ -0,0 +1,236 @@ +package azblob + +import ( + "context" + "fmt" + "io" + "net/url" + "strconv" + + "github.com/Azure/azure-pipeline-go/pipeline" +) + +const ( + // PageBlobPageBytes indicates the number of bytes in a page (512). + PageBlobPageBytes = 512 + + // PageBlobMaxPutPagesBytes indicates the maximum number of bytes that can be sent in a call to PutPage. + PageBlobMaxUploadPagesBytes = 4 * 1024 * 1024 // 4MB +) + +// PageBlobURL defines a set of operations applicable to page blobs. +type PageBlobURL struct { + BlobURL + pbClient pageBlobClient +} + +// NewPageBlobURL creates a PageBlobURL object using the specified URL and request policy pipeline. +func NewPageBlobURL(url url.URL, p pipeline.Pipeline) PageBlobURL { + if p == nil { + panic("p can't be nil") + } + blobClient := newBlobClient(url, p) + pbClient := newPageBlobClient(url, p) + return PageBlobURL{BlobURL: BlobURL{blobClient: blobClient}, pbClient: pbClient} +} + +// WithPipeline creates a new PageBlobURL object identical to the source but with the specific request policy pipeline. +func (pb PageBlobURL) WithPipeline(p pipeline.Pipeline) PageBlobURL { + return NewPageBlobURL(pb.blobClient.URL(), p) +} + +// WithSnapshot creates a new PageBlobURL object identical to the source but with the specified snapshot timestamp. +// Pass "" to remove the snapshot returning a URL to the base blob. +func (pb PageBlobURL) WithSnapshot(snapshot string) PageBlobURL { + p := NewBlobURLParts(pb.URL()) + p.Snapshot = snapshot + return NewPageBlobURL(p.URL(), pb.blobClient.Pipeline()) +} + +// CreatePageBlob creates a page blob of the specified length. Call PutPage to upload data data to a page blob. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/put-blob. +func (pb PageBlobURL) Create(ctx context.Context, size int64, sequenceNumber int64, h BlobHTTPHeaders, metadata Metadata, ac BlobAccessConditions) (*PageBlobCreateResponse, error) { + if sequenceNumber < 0 { + panic("sequenceNumber must be greater than or equal to 0") + } + ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.HTTPAccessConditions.pointers() + return pb.pbClient.Create(ctx, 0, nil, + &h.ContentType, &h.ContentEncoding, &h.ContentLanguage, h.ContentMD5, &h.CacheControl, + metadata, ac.LeaseAccessConditions.pointers(), + &h.ContentDisposition, ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, &size, &sequenceNumber, nil) +} + +// UploadPages writes 1 or more pages to the page blob. The start offset and the stream size must be a multiple of 512 bytes. +// This method panics if the stream is not at position 0. +// Note that the http client closes the body stream after the request is sent to the service. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/put-page. +func (pb PageBlobURL) UploadPages(ctx context.Context, offset int64, body io.ReadSeeker, ac BlobAccessConditions) (*PageBlobUploadPagesResponse, error) { + count := validateSeekableStreamAt0AndGetCount(body) + ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.HTTPAccessConditions.pointers() + ifSequenceNumberLessThanOrEqual, ifSequenceNumberLessThan, ifSequenceNumberEqual := ac.PageBlobAccessConditions.pointers() + return pb.pbClient.UploadPages(ctx, body, count, nil, + PageRange{Start: offset, End: offset + count - 1}.pointers(), + ac.LeaseAccessConditions.pointers(), + ifSequenceNumberLessThanOrEqual, ifSequenceNumberLessThan, ifSequenceNumberEqual, + ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, nil) +} + +// ClearPages frees the specified pages from the page blob. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/put-page. +func (pb PageBlobURL) ClearPages(ctx context.Context, offset int64, count int64, ac BlobAccessConditions) (*PageBlobClearPagesResponse, error) { + ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.HTTPAccessConditions.pointers() + ifSequenceNumberLessThanOrEqual, ifSequenceNumberLessThan, ifSequenceNumberEqual := ac.PageBlobAccessConditions.pointers() + return pb.pbClient.ClearPages(ctx, 0, nil, + PageRange{Start: offset, End: offset + count - 1}.pointers(), + ac.LeaseAccessConditions.pointers(), + ifSequenceNumberLessThanOrEqual, ifSequenceNumberLessThan, + ifSequenceNumberEqual, ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, nil) +} + +// GetPageRanges returns the list of valid page ranges for a page blob or snapshot of a page blob. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/get-page-ranges. +func (pb PageBlobURL) GetPageRanges(ctx context.Context, offset int64, count int64, ac BlobAccessConditions) (*PageList, error) { + ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.HTTPAccessConditions.pointers() + return pb.pbClient.GetPageRanges(ctx, nil, nil, + httpRange{offset: offset, count: count}.pointers(), + ac.LeaseAccessConditions.pointers(), + ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, nil) +} + +// GetPageRangesDiff gets the collection of page ranges that differ between a specified snapshot and this page blob. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/get-page-ranges. +func (pb PageBlobURL) GetPageRangesDiff(ctx context.Context, offset int64, count int64, prevSnapshot string, ac BlobAccessConditions) (*PageList, error) { + ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.HTTPAccessConditions.pointers() + return pb.pbClient.GetPageRangesDiff(ctx, nil, nil, &prevSnapshot, + httpRange{offset: offset, count: count}.pointers(), + ac.LeaseAccessConditions.pointers(), + ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, + nil) +} + +// Resize resizes the page blob to the specified size (which must be a multiple of 512). +// For more information, see https://docs.microsoft.com/rest/api/storageservices/set-blob-properties. +func (pb PageBlobURL) Resize(ctx context.Context, size int64, ac BlobAccessConditions) (*PageBlobResizeResponse, error) { + if size%PageBlobPageBytes != 0 { + panic("Size must be a multiple of PageBlobPageBytes (512)") + } + ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.HTTPAccessConditions.pointers() + return pb.pbClient.Resize(ctx, size, nil, ac.LeaseAccessConditions.pointers(), + ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, nil) +} + +// SetSequenceNumber sets the page blob's sequence number. +func (pb PageBlobURL) UpdateSequenceNumber(ctx context.Context, action SequenceNumberActionType, sequenceNumber int64, + ac BlobAccessConditions) (*PageBlobUpdateSequenceNumberResponse, error) { + if sequenceNumber < 0 { + panic("sequenceNumber must be greater than or equal to 0") + } + sn := &sequenceNumber + if action == SequenceNumberActionIncrement { + sn = nil + } + ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch := ac.HTTPAccessConditions.pointers() + return pb.pbClient.UpdateSequenceNumber(ctx, action, nil, + ac.LeaseAccessConditions.pointers(), ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, + sn, nil) +} + +// StartIncrementalCopy begins an operation to start an incremental copy from one page blob's snapshot to this page blob. +// The snapshot is copied such that only the differential changes between the previously copied snapshot are transferred to the destination. +// The copied snapshots are complete copies of the original snapshot and can be read or copied from as usual. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/incremental-copy-blob and +// https://docs.microsoft.com/en-us/azure/virtual-machines/windows/incremental-snapshots. +func (pb PageBlobURL) StartCopyIncremental(ctx context.Context, source url.URL, snapshot string, ac BlobAccessConditions) (*PageBlobCopyIncrementalResponse, error) { + ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.HTTPAccessConditions.pointers() + qp := source.Query() + qp.Set("snapshot", snapshot) + source.RawQuery = qp.Encode() + return pb.pbClient.CopyIncremental(ctx, source.String(), nil, nil, + ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, nil) +} + +func (pr PageRange) pointers() *string { + if pr.Start < 0 { + panic("PageRange's Start value must be greater than or equal to 0") + } + if pr.End <= 0 { + panic("PageRange's End value must be greater than 0") + } + if pr.Start%PageBlobPageBytes != 0 { + panic("PageRange's Start value must be a multiple of 512") + } + if pr.End%PageBlobPageBytes != (PageBlobPageBytes - 1) { + panic("PageRange's End value must be 1 less than a multiple of 512") + } + if pr.End <= pr.Start { + panic("PageRange's End value must be after the start") + } + endOffset := strconv.FormatInt(int64(pr.End), 10) + asString := fmt.Sprintf("bytes=%v-%s", pr.Start, endOffset) + return &asString +} + +// PageBlobAccessConditions identifies page blob-specific access conditions which you optionally set. +type PageBlobAccessConditions struct { + // IfSequenceNumberLessThan ensures that the page blob operation succeeds + // only if the blob's sequence number is less than a value. + // IfSequenceNumberLessThan=0 means no 'IfSequenceNumberLessThan' header specified. + // IfSequenceNumberLessThan>0 means 'IfSequenceNumberLessThan' header specified with its value + // IfSequenceNumberLessThan==-1 means 'IfSequenceNumberLessThan' header specified with a value of 0 + IfSequenceNumberLessThan int64 + + // IfSequenceNumberLessThanOrEqual ensures that the page blob operation succeeds + // only if the blob's sequence number is less than or equal to a value. + // IfSequenceNumberLessThanOrEqual=0 means no 'IfSequenceNumberLessThanOrEqual' header specified. + // IfSequenceNumberLessThanOrEqual>0 means 'IfSequenceNumberLessThanOrEqual' header specified with its value + // IfSequenceNumberLessThanOrEqual=-1 means 'IfSequenceNumberLessThanOrEqual' header specified with a value of 0 + IfSequenceNumberLessThanOrEqual int64 + + // IfSequenceNumberEqual ensures that the page blob operation succeeds + // only if the blob's sequence number is equal to a value. + // IfSequenceNumberEqual=0 means no 'IfSequenceNumberEqual' header specified. + // IfSequenceNumberEqual>0 means 'IfSequenceNumberEqual' header specified with its value + // IfSequenceNumberEqual=-1 means 'IfSequenceNumberEqual' header specified with a value of 0 + IfSequenceNumberEqual int64 +} + +// pointers is for internal infrastructure. It returns the fields as pointers. +func (ac PageBlobAccessConditions) pointers() (snltoe *int64, snlt *int64, sne *int64) { + if ac.IfSequenceNumberLessThan < -1 { + panic("Ifsequencenumberlessthan can't be less than -1") + } + if ac.IfSequenceNumberLessThanOrEqual < -1 { + panic("IfSequenceNumberLessThanOrEqual can't be less than -1") + } + if ac.IfSequenceNumberEqual < -1 { + panic("IfSequenceNumberEqual can't be less than -1") + } + + var zero int64 // Defaults to 0 + switch ac.IfSequenceNumberLessThan { + case -1: + snlt = &zero + case 0: + snlt = nil + default: + snlt = &ac.IfSequenceNumberLessThan + } + + switch ac.IfSequenceNumberLessThanOrEqual { + case -1: + snltoe = &zero + case 0: + snltoe = nil + default: + snltoe = &ac.IfSequenceNumberLessThanOrEqual + } + switch ac.IfSequenceNumberEqual { + case -1: + sne = &zero + case 0: + sne = nil + default: + sne = &ac.IfSequenceNumberEqual + } + return +} diff --git a/vendor/github.com/Azure/azure-storage-blob-go/2018-03-28/azblob/url_service.go b/vendor/github.com/Azure/azure-storage-blob-go/2018-03-28/azblob/url_service.go new file mode 100644 index 000000000000..d49a20846b3e --- /dev/null +++ b/vendor/github.com/Azure/azure-storage-blob-go/2018-03-28/azblob/url_service.go @@ -0,0 +1,140 @@ +package azblob + +import ( + "context" + "net/url" + "strings" + + "github.com/Azure/azure-pipeline-go/pipeline" +) + +const ( + // ContainerNameRoot is the special Azure Storage name used to identify a storage account's root container. + ContainerNameRoot = "$root" + + // ContainerNameLogs is the special Azure Storage name used to identify a storage account's logs container. + ContainerNameLogs = "$logs" +) + +// A ServiceURL represents a URL to the Azure Storage Blob service allowing you to manipulate blob containers. +type ServiceURL struct { + client serviceClient +} + +// NewServiceURL creates a ServiceURL object using the specified URL and request policy pipeline. +func NewServiceURL(primaryURL url.URL, p pipeline.Pipeline) ServiceURL { + if p == nil { + panic("p can't be nil") + } + client := newServiceClient(primaryURL, p) + return ServiceURL{client: client} +} + +// URL returns the URL endpoint used by the ServiceURL object. +func (s ServiceURL) URL() url.URL { + return s.client.URL() +} + +// String returns the URL as a string. +func (s ServiceURL) String() string { + u := s.URL() + return u.String() +} + +// WithPipeline creates a new ServiceURL object identical to the source but with the specified request policy pipeline. +func (s ServiceURL) WithPipeline(p pipeline.Pipeline) ServiceURL { + return NewServiceURL(s.URL(), p) +} + +// NewContainerURL creates a new ContainerURL object by concatenating containerName to the end of +// ServiceURL's URL. The new ContainerURL uses the same request policy pipeline as the ServiceURL. +// To change the pipeline, create the ContainerURL and then call its WithPipeline method passing in the +// desired pipeline object. Or, call this package's NewContainerURL instead of calling this object's +// NewContainerURL method. +func (s ServiceURL) NewContainerURL(containerName string) ContainerURL { + containerURL := appendToURLPath(s.URL(), containerName) + return NewContainerURL(containerURL, s.client.Pipeline()) +} + +// appendToURLPath appends a string to the end of a URL's path (prefixing the string with a '/' if required) +func appendToURLPath(u url.URL, name string) url.URL { + // e.g. "https://ms.com/a/b/?k1=v1&k2=v2#f" + // When you call url.Parse() this is what you'll get: + // Scheme: "https" + // Opaque: "" + // User: nil + // Host: "ms.com" + // Path: "/a/b/" This should start with a / and it might or might not have a trailing slash + // RawPath: "" + // ForceQuery: false + // RawQuery: "k1=v1&k2=v2" + // Fragment: "f" + if len(u.Path) == 0 || u.Path[len(u.Path)-1] != '/' { + u.Path += "/" // Append "/" to end before appending name + } + u.Path += name + return u +} + +// ListContainersFlatSegment returns a single segment of containers starting from the specified Marker. Use an empty +// Marker to start enumeration from the beginning. Container names are returned in lexicographic order. +// After getting a segment, process it, and then call ListContainersFlatSegment again (passing the the +// previously-returned Marker) to get the next segment. For more information, see +// https://docs.microsoft.com/rest/api/storageservices/list-containers2. +func (s ServiceURL) ListContainersSegment(ctx context.Context, marker Marker, o ListContainersSegmentOptions) (*ListContainersResponse, error) { + prefix, include, maxResults := o.pointers() + return s.client.ListContainersSegment(ctx, prefix, marker.val, maxResults, include, nil, nil) +} + +// ListContainersOptions defines options available when calling ListContainers. +type ListContainersSegmentOptions struct { + Detail ListContainersDetail // No IncludeType header is produced if "" + Prefix string // No Prefix header is produced if "" + MaxResults int32 // 0 means unspecified + // TODO: update swagger to generate this type? +} + +func (o *ListContainersSegmentOptions) pointers() (prefix *string, include ListContainersIncludeType, maxResults *int32) { + if o.Prefix != "" { + prefix = &o.Prefix + } + if o.MaxResults != 0 { + if o.MaxResults < 0 { + panic("MaxResults must be >= 0") + } + maxResults = &o.MaxResults + } + include = ListContainersIncludeType(o.Detail.string()) + return +} + +// ListContainersFlatDetail indicates what additional information the service should return with each container. +type ListContainersDetail struct { + // Tells the service whether to return metadata for each container. + Metadata bool +} + +// string produces the Include query parameter's value. +func (d *ListContainersDetail) string() string { + items := make([]string, 0, 1) + // NOTE: Multiple strings MUST be appended in alphabetic order or signing the string for authentication fails! + if d.Metadata { + items = append(items, string(ListContainersIncludeMetadata)) + } + if len(items) > 0 { + return strings.Join(items, ",") + } + return string(ListContainersIncludeNone) +} + +func (bsu ServiceURL) GetProperties(ctx context.Context) (*StorageServiceProperties, error) { + return bsu.client.GetProperties(ctx, nil, nil) +} + +func (bsu ServiceURL) SetProperties(ctx context.Context, properties StorageServiceProperties) (*ServiceSetPropertiesResponse, error) { + return bsu.client.SetProperties(ctx, properties, nil, nil) +} + +func (bsu ServiceURL) GetStatistics(ctx context.Context) (*StorageServiceStats, error) { + return bsu.client.GetStatistics(ctx, nil, nil) +} diff --git a/vendor/github.com/Azure/azure-storage-blob-go/2018-03-28/azblob/version.go b/vendor/github.com/Azure/azure-storage-blob-go/2018-03-28/azblob/version.go new file mode 100644 index 000000000000..8a8926bd33cc --- /dev/null +++ b/vendor/github.com/Azure/azure-storage-blob-go/2018-03-28/azblob/version.go @@ -0,0 +1,3 @@ +package azblob + +const serviceLibVersion = "0.1" diff --git a/vendor/github.com/Azure/azure-storage-blob-go/2018-03-28/azblob/zc_credential_anonymous.go b/vendor/github.com/Azure/azure-storage-blob-go/2018-03-28/azblob/zc_credential_anonymous.go new file mode 100644 index 000000000000..a81987d54a30 --- /dev/null +++ b/vendor/github.com/Azure/azure-storage-blob-go/2018-03-28/azblob/zc_credential_anonymous.go @@ -0,0 +1,55 @@ +package azblob + +import ( + "context" + + "github.com/Azure/azure-pipeline-go/pipeline" +) + +// Credential represent any credential type; it is used to create a credential policy Factory. +type Credential interface { + pipeline.Factory + credentialMarker() +} + +type credentialFunc pipeline.FactoryFunc + +func (f credentialFunc) New(next pipeline.Policy, po *pipeline.PolicyOptions) pipeline.Policy { + return f(next, po) +} + +// credentialMarker is a package-internal method that exists just to satisfy the Credential interface. +func (credentialFunc) credentialMarker() {} + +////////////////////////////// + +// NewAnonymousCredential creates an anonymous credential for use with HTTP(S) requests that read public resource +// or for use with Shared Access Signatures (SAS). +func NewAnonymousCredential() Credential { + return anonymousCredentialFactory +} + +var anonymousCredentialFactory Credential = &anonymousCredentialPolicyFactory{} // Singleton + +// anonymousCredentialPolicyFactory is the credential's policy factory. +type anonymousCredentialPolicyFactory struct { +} + +// New creates a credential policy object. +func (f *anonymousCredentialPolicyFactory) New(next pipeline.Policy, po *pipeline.PolicyOptions) pipeline.Policy { + return &anonymousCredentialPolicy{next: next} +} + +// credentialMarker is a package-internal method that exists just to satisfy the Credential interface. +func (*anonymousCredentialPolicyFactory) credentialMarker() {} + +// anonymousCredentialPolicy is the credential's policy object. +type anonymousCredentialPolicy struct { + next pipeline.Policy +} + +// Do implements the credential's policy interface. +func (p anonymousCredentialPolicy) Do(ctx context.Context, request pipeline.Request) (pipeline.Response, error) { + // For anonymous credentials, this is effectively a no-op + return p.next.Do(ctx, request) +} diff --git a/vendor/github.com/Azure/azure-storage-blob-go/2018-03-28/azblob/zc_credential_shared_key.go b/vendor/github.com/Azure/azure-storage-blob-go/2018-03-28/azblob/zc_credential_shared_key.go new file mode 100644 index 000000000000..51da1627797a --- /dev/null +++ b/vendor/github.com/Azure/azure-storage-blob-go/2018-03-28/azblob/zc_credential_shared_key.go @@ -0,0 +1,187 @@ +package azblob + +import ( + "bytes" + "context" + "crypto/hmac" + "crypto/sha256" + "encoding/base64" + "net/http" + "net/url" + "sort" + "strings" + "time" + + "github.com/Azure/azure-pipeline-go/pipeline" +) + +// NewSharedKeyCredential creates an immutable SharedKeyCredential containing the +// storage account's name and either its primary or secondary key. +func NewSharedKeyCredential(accountName, accountKey string) *SharedKeyCredential { + bytes, err := base64.StdEncoding.DecodeString(accountKey) + if err != nil { + panic(err) + } + return &SharedKeyCredential{accountName: accountName, accountKey: bytes} +} + +// SharedKeyCredential contains an account's name and its primary or secondary key. +// It is immutable making it shareable and goroutine-safe. +type SharedKeyCredential struct { + // Only the NewSharedKeyCredential method should set these; all other methods should treat them as read-only + accountName string + accountKey []byte +} + +// AccountName returns the Storage account's name. +func (f SharedKeyCredential) AccountName() string { + return f.accountName +} + +// New creates a credential policy object. +func (f *SharedKeyCredential) New(next pipeline.Policy, po *pipeline.PolicyOptions) pipeline.Policy { + return pipeline.PolicyFunc(func(ctx context.Context, request pipeline.Request) (pipeline.Response, error) { + // Add a x-ms-date header if it doesn't already exist + if d := request.Header.Get(headerXmsDate); d == "" { + request.Header[headerXmsDate] = []string{time.Now().UTC().Format(http.TimeFormat)} + } + stringToSign := f.buildStringToSign(request) + signature := f.ComputeHMACSHA256(stringToSign) + authHeader := strings.Join([]string{"SharedKey ", f.accountName, ":", signature}, "") + request.Header[headerAuthorization] = []string{authHeader} + + response, err := next.Do(ctx, request) + if err != nil && response != nil && response.Response() != nil && response.Response().StatusCode == http.StatusForbidden { + // Service failed to authenticate request, log it + po.Log(pipeline.LogError, "===== HTTP Forbidden status, String-to-Sign:\n"+stringToSign+"\n===============================\n") + } + return response, err + }) +} + +// credentialMarker is a package-internal method that exists just to satisfy the Credential interface. +func (*SharedKeyCredential) credentialMarker() {} + +// Constants ensuring that header names are correctly spelled and consistently cased. +const ( + headerAuthorization = "Authorization" + headerCacheControl = "Cache-Control" + headerContentEncoding = "Content-Encoding" + headerContentDisposition = "Content-Disposition" + headerContentLanguage = "Content-Language" + headerContentLength = "Content-Length" + headerContentMD5 = "Content-MD5" + headerContentType = "Content-Type" + headerDate = "Date" + headerIfMatch = "If-Match" + headerIfModifiedSince = "If-Modified-Since" + headerIfNoneMatch = "If-None-Match" + headerIfUnmodifiedSince = "If-Unmodified-Since" + headerRange = "Range" + headerUserAgent = "User-Agent" + headerXmsDate = "x-ms-date" + headerXmsVersion = "x-ms-version" +) + +// ComputeHMACSHA256 generates a hash signature for an HTTP request or for a SAS. +func (f *SharedKeyCredential) ComputeHMACSHA256(message string) (base64String string) { + h := hmac.New(sha256.New, f.accountKey) + h.Write([]byte(message)) + return base64.StdEncoding.EncodeToString(h.Sum(nil)) +} + +func (f *SharedKeyCredential) buildStringToSign(request pipeline.Request) string { + // https://docs.microsoft.com/en-us/rest/api/storageservices/authentication-for-the-azure-storage-services + headers := request.Header + contentLength := headers.Get(headerContentLength) + if contentLength == "0" { + contentLength = "" + } + + stringToSign := strings.Join([]string{ + request.Method, + headers.Get(headerContentEncoding), + headers.Get(headerContentLanguage), + contentLength, + headers.Get(headerContentMD5), + headers.Get(headerContentType), + "", // Empty date because x-ms-date is expected (as per web page above) + headers.Get(headerIfModifiedSince), + headers.Get(headerIfMatch), + headers.Get(headerIfNoneMatch), + headers.Get(headerIfUnmodifiedSince), + headers.Get(headerRange), + buildCanonicalizedHeader(headers), + f.buildCanonicalizedResource(request.URL), + }, "\n") + return stringToSign +} + +func buildCanonicalizedHeader(headers http.Header) string { + cm := map[string][]string{} + for k, v := range headers { + headerName := strings.TrimSpace(strings.ToLower(k)) + if strings.HasPrefix(headerName, "x-ms-") { + cm[headerName] = v // NOTE: the value must not have any whitespace around it. + } + } + if len(cm) == 0 { + return "" + } + + keys := make([]string, 0, len(cm)) + for key := range cm { + keys = append(keys, key) + } + sort.Strings(keys) + ch := bytes.NewBufferString("") + for i, key := range keys { + if i > 0 { + ch.WriteRune('\n') + } + ch.WriteString(key) + ch.WriteRune(':') + ch.WriteString(strings.Join(cm[key], ",")) + } + return string(ch.Bytes()) +} + +func (f *SharedKeyCredential) buildCanonicalizedResource(u *url.URL) string { + // https://docs.microsoft.com/en-us/rest/api/storageservices/authentication-for-the-azure-storage-services + cr := bytes.NewBufferString("/") + cr.WriteString(f.accountName) + + if len(u.Path) > 0 { + // Any portion of the CanonicalizedResource string that is derived from + // the resource's URI should be encoded exactly as it is in the URI. + // -- https://msdn.microsoft.com/en-gb/library/azure/dd179428.aspx + cr.WriteString(u.EscapedPath()) + } else { + // a slash is required to indicate the root path + cr.WriteString("/") + } + + // params is a map[string][]string; param name is key; params values is []string + params, err := url.ParseQuery(u.RawQuery) // Returns URL decoded values + if err != nil { + panic(err) + } + + if len(params) > 0 { // There is at least 1 query parameter + paramNames := []string{} // We use this to sort the parameter key names + for paramName := range params { + paramNames = append(paramNames, paramName) // paramNames must be lowercase + } + sort.Strings(paramNames) + + for _, paramName := range paramNames { + paramValues := params[paramName] + sort.Strings(paramValues) + + // Join the sorted key values separated by ',' + // Then prepend "keyName:"; then add this string to the buffer + cr.WriteString("\n" + paramName + ":" + strings.Join(paramValues, ",")) + } + } + return string(cr.Bytes()) +} diff --git a/vendor/github.com/Azure/azure-storage-blob-go/2018-03-28/azblob/zc_credential_token.go b/vendor/github.com/Azure/azure-storage-blob-go/2018-03-28/azblob/zc_credential_token.go new file mode 100644 index 000000000000..d7f925d39818 --- /dev/null +++ b/vendor/github.com/Azure/azure-storage-blob-go/2018-03-28/azblob/zc_credential_token.go @@ -0,0 +1,126 @@ +package azblob + +import ( + "context" + "sync/atomic" + + "runtime" + "sync" + "time" + + "github.com/Azure/azure-pipeline-go/pipeline" +) + +// TokenCredential represents a token credential (which is also a pipeline.Factory). +type TokenCredential interface { + Credential + Token() string + SetToken(newToken string) +} + +// NewTokenCredential creates a token credential for use with role-based access control (RBAC) access to Azure Storage +// resources. You initialize the TokenCredential with an initial token value. If you pass a non-nil value for +// tokenRefresher, then the function you pass will be called immediately (so it can refresh and change the +// TokenCredential's token value by calling SetToken; your tokenRefresher function must return a time.Duration +// indicating how long the TokenCredential object should wait before calling your tokenRefresher function again. +func NewTokenCredential(initialToken string, tokenRefresher func(credential TokenCredential) time.Duration) TokenCredential { + tc := &tokenCredential{} + tc.SetToken(initialToken) // We dont' set it above to guarantee atomicity + if tokenRefresher == nil { + return tc // If no callback specified, return the simple tokenCredential + } + + tcwr := &tokenCredentialWithRefresh{token: tc} + tcwr.token.startRefresh(tokenRefresher) + runtime.SetFinalizer(tcwr, func(deadTC *tokenCredentialWithRefresh) { + deadTC.token.stopRefresh() + deadTC.token = nil // Sanity (not really required) + }) + return tcwr +} + +// tokenCredentialWithRefresh is a wrapper over a token credential. +// When this wrapper object gets GC'd, it stops the tokenCredential's timer +// which allows the tokenCredential object to also be GC'd. +type tokenCredentialWithRefresh struct { + token *tokenCredential +} + +// credentialMarker is a package-internal method that exists just to satisfy the Credential interface. +func (*tokenCredentialWithRefresh) credentialMarker() {} + +// Token returns the current token value +func (f *tokenCredentialWithRefresh) Token() string { return f.token.Token() } + +// SetToken changes the current token value +func (f *tokenCredentialWithRefresh) SetToken(token string) { f.token.SetToken(token) } + +// New satisfies pipeline.Factory's New method creating a pipeline policy object. +func (f *tokenCredentialWithRefresh) New(next pipeline.Policy, po *pipeline.PolicyOptions) pipeline.Policy { + return f.token.New(next, po) +} + +/////////////////////////////////////////////////////////////////////////////// + +// tokenCredential is a pipeline.Factory is the credential's policy factory. +type tokenCredential struct { + token atomic.Value + + // The members below are only used if the user specified a tokenRefresher callback function. + timer *time.Timer + tokenRefresher func(c TokenCredential) time.Duration + lock sync.Mutex + stopped bool +} + +// credentialMarker is a package-internal method that exists just to satisfy the Credential interface. +func (*tokenCredential) credentialMarker() {} + +// Token returns the current token value +func (f *tokenCredential) Token() string { return f.token.Load().(string) } + +// SetToken changes the current token value +func (f *tokenCredential) SetToken(token string) { f.token.Store(token) } + +// startRefresh calls refresh which immediately calls tokenRefresher +// and then starts a timer to call tokenRefresher in the future. +func (f *tokenCredential) startRefresh(tokenRefresher func(c TokenCredential) time.Duration) { + f.tokenRefresher = tokenRefresher + f.stopped = false // In case user calls StartRefresh, StopRefresh, & then StartRefresh again + f.refresh() +} + +// refresh calls the user's tokenRefresher so they can refresh the token (by +// calling SetToken) and then starts another time (based on the returned duration) +// in order to refresh the token again in the future. +func (f *tokenCredential) refresh() { + d := f.tokenRefresher(f) // Invoke the user's refresh callback outside of the lock + f.lock.Lock() + if !f.stopped { + f.timer = time.AfterFunc(d, f.refresh) + } + f.lock.Unlock() +} + +// stopRefresh stops any pending timer and sets stopped field to true to prevent +// any new timer from starting. +// NOTE: Stopping the timer allows the GC to destroy the tokenCredential object. +func (f *tokenCredential) stopRefresh() { + f.lock.Lock() + f.stopped = true + if f.timer != nil { + f.timer.Stop() + } + f.lock.Unlock() +} + +// New satisfies pipeline.Factory's New method creating a pipeline policy object. +func (f *tokenCredential) New(next pipeline.Policy, po *pipeline.PolicyOptions) pipeline.Policy { + return pipeline.PolicyFunc(func(ctx context.Context, request pipeline.Request) (pipeline.Response, error) { + if request.URL.Scheme != "https" { + panic("Token credentials require a URL using the https protocol scheme.") + } + request.Header[headerAuthorization] = []string{"Bearer " + f.Token()} + return next.Do(ctx, request) + }) +} diff --git a/vendor/github.com/Azure/azure-storage-blob-go/2018-03-28/azblob/zc_mmf_unix.go b/vendor/github.com/Azure/azure-storage-blob-go/2018-03-28/azblob/zc_mmf_unix.go new file mode 100644 index 000000000000..b6c668ac6480 --- /dev/null +++ b/vendor/github.com/Azure/azure-storage-blob-go/2018-03-28/azblob/zc_mmf_unix.go @@ -0,0 +1,27 @@ +// +build linux darwin freebsd + +package azblob + +import ( + "os" + "syscall" +) + +type mmf []byte + +func newMMF(file *os.File, writable bool, offset int64, length int) (mmf, error) { + prot, flags := syscall.PROT_READ, syscall.MAP_SHARED // Assume read-only + if writable { + prot, flags = syscall.PROT_READ|syscall.PROT_WRITE, syscall.MAP_SHARED + } + addr, err := syscall.Mmap(int(file.Fd()), offset, length, prot, flags) + return mmf(addr), err +} + +func (m *mmf) unmap() { + err := syscall.Munmap(*m) + *m = nil + if err != nil { + panic(err) + } +} diff --git a/vendor/github.com/Azure/azure-storage-blob-go/2018-03-28/azblob/zc_mmf_windows.go b/vendor/github.com/Azure/azure-storage-blob-go/2018-03-28/azblob/zc_mmf_windows.go new file mode 100644 index 000000000000..1a6e83dad9b2 --- /dev/null +++ b/vendor/github.com/Azure/azure-storage-blob-go/2018-03-28/azblob/zc_mmf_windows.go @@ -0,0 +1,38 @@ +package azblob + +import ( + "os" + "reflect" + "syscall" + "unsafe" +) + +type mmf []byte + +func newMMF(file *os.File, writable bool, offset int64, length int) (mmf, error) { + prot, access := uint32(syscall.PAGE_READONLY), uint32(syscall.FILE_MAP_READ) // Assume read-only + if writable { + prot, access = uint32(syscall.PAGE_READWRITE), uint32(syscall.FILE_MAP_WRITE) + } + hMMF, errno := syscall.CreateFileMapping(syscall.Handle(file.Fd()), nil, prot, uint32(int64(length)>>32), uint32(int64(length)&0xffffffff), nil) + if hMMF == 0 { + return nil, os.NewSyscallError("CreateFileMapping", errno) + } + defer syscall.CloseHandle(hMMF) + addr, errno := syscall.MapViewOfFile(hMMF, access, uint32(offset>>32), uint32(offset&0xffffffff), uintptr(length)) + m := mmf{} + h := (*reflect.SliceHeader)(unsafe.Pointer(&m)) + h.Data = addr + h.Len = length + h.Cap = h.Len + return m, nil +} + +func (m *mmf) unmap() { + addr := uintptr(unsafe.Pointer(&(([]byte)(*m)[0]))) + *m = mmf{} + err := syscall.UnmapViewOfFile(addr) + if err != nil { + panic(err) + } +} diff --git a/vendor/github.com/Azure/azure-storage-blob-go/2018-03-28/azblob/zc_pipeline.go b/vendor/github.com/Azure/azure-storage-blob-go/2018-03-28/azblob/zc_pipeline.go new file mode 100644 index 000000000000..af5fcd6c7f88 --- /dev/null +++ b/vendor/github.com/Azure/azure-storage-blob-go/2018-03-28/azblob/zc_pipeline.go @@ -0,0 +1,46 @@ +package azblob + +import ( + "github.com/Azure/azure-pipeline-go/pipeline" +) + +// PipelineOptions is used to configure a request policy pipeline's retry policy and logging. +type PipelineOptions struct { + // Log configures the pipeline's logging infrastructure indicating what information is logged and where. + Log pipeline.LogOptions + + // Retry configures the built-in retry policy behavior. + Retry RetryOptions + + // RequestLog configures the built-in request logging policy. + RequestLog RequestLogOptions + + // Telemetry configures the built-in telemetry policy behavior. + Telemetry TelemetryOptions +} + +// NewPipeline creates a Pipeline using the specified credentials and options. +func NewPipeline(c Credential, o PipelineOptions) pipeline.Pipeline { + if c == nil { + panic("c can't be nil") + } + + // Closest to API goes first; closest to the wire goes last + f := []pipeline.Factory{ + NewTelemetryPolicyFactory(o.Telemetry), + NewUniqueRequestIDPolicyFactory(), + NewRetryPolicyFactory(o.Retry), + } + + if _, ok := c.(*anonymousCredentialPolicyFactory); !ok { + // For AnonymousCredential, we optimize out the policy factory since it doesn't do anything + // NOTE: The credential's policy factory must appear close to the wire so it can sign any + // changes made by other factories (like UniqueRequestIDPolicyFactory) + f = append(f, c) + } + f = append(f, + pipeline.MethodFactoryMarker(), // indicates at what stage in the pipeline the method factory is invoked + NewRequestLogPolicyFactory(o.RequestLog)) + + return pipeline.NewPipeline(f, pipeline.Options{HTTPSender: nil, Log: o.Log}) +} diff --git a/vendor/github.com/Azure/azure-storage-blob-go/2018-03-28/azblob/zc_policy_request_log.go b/vendor/github.com/Azure/azure-storage-blob-go/2018-03-28/azblob/zc_policy_request_log.go new file mode 100644 index 000000000000..23d559eb7c43 --- /dev/null +++ b/vendor/github.com/Azure/azure-storage-blob-go/2018-03-28/azblob/zc_policy_request_log.go @@ -0,0 +1,150 @@ +package azblob + +import ( + "bytes" + "context" + "fmt" + "net/http" + "net/url" + "runtime" + "strings" + "time" + + "github.com/Azure/azure-pipeline-go/pipeline" +) + +// RequestLogOptions configures the retry policy's behavior. +type RequestLogOptions struct { + // LogWarningIfTryOverThreshold logs a warning if a tried operation takes longer than the specified + // duration (-1=no logging; 0=default threshold). + LogWarningIfTryOverThreshold time.Duration +} + +func (o RequestLogOptions) defaults() RequestLogOptions { + if o.LogWarningIfTryOverThreshold == 0 { + // It would be good to relate this to https://azure.microsoft.com/en-us/support/legal/sla/storage/v1_2/ + // But this monitors the time to get the HTTP response; NOT the time to download the response body. + o.LogWarningIfTryOverThreshold = 3 * time.Second // Default to 3 seconds + } + return o +} + +// NewRequestLogPolicyFactory creates a RequestLogPolicyFactory object configured using the specified options. +func NewRequestLogPolicyFactory(o RequestLogOptions) pipeline.Factory { + o = o.defaults() // Force defaults to be calculated + return pipeline.FactoryFunc(func(next pipeline.Policy, po *pipeline.PolicyOptions) pipeline.PolicyFunc { + // These variables are per-policy; shared by multiple calls to Do + var try int32 + operationStart := time.Now() // If this is the 1st try, record the operation state time + return func(ctx context.Context, request pipeline.Request) (response pipeline.Response, err error) { + try++ // The first try is #1 (not #0) + + // Log the outgoing request as informational + if po.ShouldLog(pipeline.LogInfo) { + b := &bytes.Buffer{} + fmt.Fprintf(b, "==> OUTGOING REQUEST (Try=%d)\n", try) + pipeline.WriteRequestWithResponse(b, prepareRequestForLogging(request), nil, nil) + po.Log(pipeline.LogInfo, b.String()) + } + + // Set the time for this particular retry operation and then Do the operation. + tryStart := time.Now() + response, err = next.Do(ctx, request) // Make the request + tryEnd := time.Now() + tryDuration := tryEnd.Sub(tryStart) + opDuration := tryEnd.Sub(operationStart) + + logLevel, forceLog := pipeline.LogInfo, false // Default logging information + + // If the response took too long, we'll upgrade to warning. + if o.LogWarningIfTryOverThreshold > 0 && tryDuration > o.LogWarningIfTryOverThreshold { + // Log a warning if the try duration exceeded the specified threshold + logLevel, forceLog = pipeline.LogWarning, true + } + + if err == nil { // We got a response from the service + sc := response.Response().StatusCode + if ((sc >= 400 && sc <= 499) && sc != http.StatusNotFound && sc != http.StatusConflict && sc != http.StatusPreconditionFailed && sc != http.StatusRequestedRangeNotSatisfiable) || (sc >= 500 && sc <= 599) { + logLevel, forceLog = pipeline.LogError, true // Promote to Error any 4xx (except those listed is an error) or any 5xx + } else { + // For other status codes, we leave the level as is. + } + } else { // This error did not get an HTTP response from the service; upgrade the severity to Error + logLevel, forceLog = pipeline.LogError, true + } + + if shouldLog := po.ShouldLog(logLevel); forceLog || shouldLog { + // We're going to log this; build the string to log + b := &bytes.Buffer{} + slow := "" + if o.LogWarningIfTryOverThreshold > 0 && tryDuration > o.LogWarningIfTryOverThreshold { + slow = fmt.Sprintf("[SLOW >%v]", o.LogWarningIfTryOverThreshold) + } + fmt.Fprintf(b, "==> REQUEST/RESPONSE (Try=%d/%v%s, OpTime=%v) -- ", try, tryDuration, slow, opDuration) + if err != nil { // This HTTP request did not get a response from the service + fmt.Fprint(b, "REQUEST ERROR\n") + } else { + if logLevel == pipeline.LogError { + fmt.Fprint(b, "RESPONSE STATUS CODE ERROR\n") + } else { + fmt.Fprint(b, "RESPONSE SUCCESSFULLY RECEIVED\n") + } + } + + pipeline.WriteRequestWithResponse(b, prepareRequestForLogging(request), response.Response(), err) + if logLevel <= pipeline.LogError { + b.Write(stack()) // For errors (or lower levels), we append the stack trace (an expensive operation) + } + msg := b.String() + + if forceLog { + pipeline.ForceLog(logLevel, msg) + } + if shouldLog { + po.Log(logLevel, msg) + } + } + return response, err + } + }) +} + +func redactSigQueryParam(rawQuery string) (bool, string) { + rawQuery = strings.ToLower(rawQuery) // lowercase the string so we can look for ?sig= and &sig= + sigFound := strings.Contains(rawQuery, "?sig=") + if !sigFound { + sigFound = strings.Contains(rawQuery, "&sig=") + if !sigFound { + return sigFound, rawQuery // [?|&]sig= not found; return same rawQuery passed in (no memory allocation) + } + } + // [?|&]sig= found, redact its value + values, _ := url.ParseQuery(rawQuery) + for name := range values { + if strings.EqualFold(name, "sig") { + values[name] = []string{"REDACTED"} + } + } + return sigFound, values.Encode() +} + +func prepareRequestForLogging(request pipeline.Request) *http.Request { + req := request + if sigFound, rawQuery := redactSigQueryParam(req.URL.RawQuery); sigFound { + // Make copy so we don't destroy the query parameters we actually need to send in the request + req = request.Copy() + req.Request.URL.RawQuery = rawQuery + } + return req.Request +} + +func stack() []byte { + buf := make([]byte, 1024) + for { + n := runtime.Stack(buf, false) + if n < len(buf) { + return buf[:n] + } + buf = make([]byte, 2*len(buf)) + } +} diff --git a/vendor/github.com/Azure/azure-storage-blob-go/2018-03-28/azblob/zc_policy_retry.go b/vendor/github.com/Azure/azure-storage-blob-go/2018-03-28/azblob/zc_policy_retry.go new file mode 100644 index 000000000000..4c885ea1aa35 --- /dev/null +++ b/vendor/github.com/Azure/azure-storage-blob-go/2018-03-28/azblob/zc_policy_retry.go @@ -0,0 +1,318 @@ +package azblob + +import ( + "context" + "math/rand" + "net" + "net/http" + "strconv" + "time" + + "github.com/Azure/azure-pipeline-go/pipeline" + "io/ioutil" + "io" +) + +// RetryPolicy tells the pipeline what kind of retry policy to use. See the RetryPolicy* constants. +type RetryPolicy int32 + +const ( + // RetryPolicyExponential tells the pipeline to use an exponential back-off retry policy + RetryPolicyExponential RetryPolicy = 0 + + // RetryPolicyFixed tells the pipeline to use a fixed back-off retry policy + RetryPolicyFixed RetryPolicy = 1 +) + +// RetryOptions configures the retry policy's behavior. +type RetryOptions struct { + // Policy tells the pipeline what kind of retry policy to use. See the RetryPolicy* constants.\ + // A value of zero means that you accept our default policy. + Policy RetryPolicy + + // MaxTries specifies the maximum number of attempts an operation will be tried before producing an error (0=default). + // A value of zero means that you accept our default policy. A value of 1 means 1 try and no retries. + MaxTries int32 + + // TryTimeout indicates the maximum time allowed for any single try of an HTTP request. + // A value of zero means that you accept our default timeout. NOTE: When transferring large amounts + // of data, the default TryTimeout will probably not be sufficient. You should override this value + // based on the bandwidth available to the host machine and proximity to the Storage service. A good + // starting point may be something like (60 seconds per MB of anticipated-payload-size). + TryTimeout time.Duration + + // RetryDelay specifies the amount of delay to use before retrying an operation (0=default). + // When RetryPolicy is specified as RetryPolicyExponential, the delay increases exponentially + // with each retry up to a maximum specified by MaxRetryDelay. + // If you specify 0, then you must also specify 0 for MaxRetryDelay. + // If you specify RetryDelay, then you must also specify MaxRetryDelay, and MaxRetryDelay should be + // equal to or greater than RetryDelay. + RetryDelay time.Duration + + // MaxRetryDelay specifies the maximum delay allowed before retrying an operation (0=default). + // If you specify 0, then you must also specify 0 for RetryDelay. + MaxRetryDelay time.Duration + + // RetryReadsFromSecondaryHost specifies whether the retry policy should retry a read operation against another host. + // If RetryReadsFromSecondaryHost is "" (the default) then operations are not retried against another host. + // NOTE: Before setting this field, make sure you understand the issues around reading stale & potentially-inconsistent + // data at this webpage: https://docs.microsoft.com/en-us/azure/storage/common/storage-designing-ha-apps-with-ragrs + RetryReadsFromSecondaryHost string // Comment this our for non-Blob SDKs +} + +func (o RetryOptions) retryReadsFromSecondaryHost() string { + return o.RetryReadsFromSecondaryHost // This is for the Blob SDK only + //return "" // This is for non-blob SDKs +} + +func (o RetryOptions) defaults() RetryOptions { + if o.Policy != RetryPolicyExponential && o.Policy != RetryPolicyFixed { + panic("RetryPolicy must be RetryPolicyExponential or RetryPolicyFixed") + } + if o.MaxTries < 0 { + panic("MaxTries must be >= 0") + } + if o.TryTimeout < 0 || o.RetryDelay < 0 || o.MaxRetryDelay < 0 { + panic("TryTimeout, RetryDelay, and MaxRetryDelay must all be >= 0") + } + if o.RetryDelay > o.MaxRetryDelay { + panic("RetryDelay must be <= MaxRetryDelay") + } + if (o.RetryDelay == 0 && o.MaxRetryDelay != 0) || (o.RetryDelay != 0 && o.MaxRetryDelay == 0) { + panic("Both RetryDelay and MaxRetryDelay must be 0 or neither can be 0") + } + + IfDefault := func(current *time.Duration, desired time.Duration) { + if *current == time.Duration(0) { + *current = desired + } + } + + // Set defaults if unspecified + if o.MaxTries == 0 { + o.MaxTries = 4 + } + switch o.Policy { + case RetryPolicyExponential: + IfDefault(&o.TryTimeout, 1*time.Minute) + IfDefault(&o.RetryDelay, 4*time.Second) + IfDefault(&o.MaxRetryDelay, 120*time.Second) + + case RetryPolicyFixed: + IfDefault(&o.TryTimeout, 1*time.Minute) + IfDefault(&o.RetryDelay, 30*time.Second) + IfDefault(&o.MaxRetryDelay, 120*time.Second) + } + return o +} + +func (o RetryOptions) calcDelay(try int32) time.Duration { // try is >=1; never 0 + pow := func(number int64, exponent int32) int64 { // pow is nested helper function + var result int64 = 1 + for n := int32(0); n < exponent; n++ { + result *= number + } + return result + } + + delay := time.Duration(0) + switch o.Policy { + case RetryPolicyExponential: + delay = time.Duration(pow(2, try-1)-1) * o.RetryDelay + + case RetryPolicyFixed: + if try > 1 { // Any try after the 1st uses the fixed delay + delay = o.RetryDelay + } + } + + // Introduce some jitter: [0.0, 1.0) / 2 = [0.0, 0.5) + 0.8 = [0.8, 1.3) + delay = time.Duration(delay.Seconds() * (rand.Float64()/2 + 0.8) * float64(time.Second)) // NOTE: We want math/rand; not crypto/rand + if delay > o.MaxRetryDelay { + delay = o.MaxRetryDelay + } + return delay +} + +// NewRetryPolicyFactory creates a RetryPolicyFactory object configured using the specified options. +func NewRetryPolicyFactory(o RetryOptions) pipeline.Factory { + o = o.defaults() // Force defaults to be calculated + return pipeline.FactoryFunc(func(next pipeline.Policy, po *pipeline.PolicyOptions) pipeline.PolicyFunc { + return func(ctx context.Context, request pipeline.Request) (response pipeline.Response, err error) { + // Before each try, we'll select either the primary or secondary URL. + primaryTry := int32(0) // This indicates how many tries we've attempted against the primary DC + + // We only consider retrying against a secondary if we have a read request (GET/HEAD) AND this policy has a Secondary URL it can use + considerSecondary := (request.Method == http.MethodGet || request.Method == http.MethodHead) && o.retryReadsFromSecondaryHost() != "" + + // Exponential retry algorithm: ((2 ^ attempt) - 1) * delay * random(0.8, 1.2) + // When to retry: connection failure or temporary/timeout. NOTE: StorageError considers HTTP 500/503 as temporary & is therefore retryable + // If using a secondary: + // Even tries go against primary; odd tries go against the secondary + // For a primary wait ((2 ^ primaryTries - 1) * delay * random(0.8, 1.2) + // If secondary gets a 404, don't fail, retry but future retries are only against the primary + // When retrying against a secondary, ignore the retry count and wait (.1 second * random(0.8, 1.2)) + for try := int32(1); try <= o.MaxTries; try++ { + logf("\n=====> Try=%d\n", try) + + // Determine which endpoint to try. It's primary if there is no secondary or if it is an add # attempt. + tryingPrimary := !considerSecondary || (try%2 == 1) + // Select the correct host and delay + if tryingPrimary { + primaryTry++ + delay := o.calcDelay(primaryTry) + logf("Primary try=%d, Delay=%v\n", primaryTry, delay) + time.Sleep(delay) // The 1st try returns 0 delay + } else { + delay := time.Second * time.Duration(rand.Float32()/2+0.8) + logf("Secondary try=%d, Delay=%v\n", try-primaryTry, delay) + time.Sleep(delay) // Delay with some jitter before trying secondary + } + + // Clone the original request to ensure that each try starts with the original (unmutated) request. + requestCopy := request.Copy() + + // For each try, seek to the beginning of the Body stream. We do this even for the 1st try because + // the stream may not be at offset 0 when we first get it and we want the same behavior for the + // 1st try as for additional tries. + if err = requestCopy.RewindBody(); err != nil { + panic(err) + } + if !tryingPrimary { + requestCopy.Request.URL.Host = o.retryReadsFromSecondaryHost() + } + + // Set the server-side timeout query parameter "timeout=[seconds]" + timeout := int32(o.TryTimeout.Seconds()) // Max seconds per try + if deadline, ok := ctx.Deadline(); ok { // If user's ctx has a deadline, make the timeout the smaller of the two + t := int32(deadline.Sub(time.Now()).Seconds()) // Duration from now until user's ctx reaches its deadline + logf("MaxTryTimeout=%d secs, TimeTilDeadline=%d sec\n", timeout, t) + if t < timeout { + timeout = t + } + if timeout < 0 { + timeout = 0 // If timeout ever goes negative, set it to zero; this happen while debugging + } + logf("TryTimeout adjusted to=%d sec\n", timeout) + } + q := requestCopy.Request.URL.Query() + q.Set("timeout", strconv.Itoa(int(timeout+1))) // Add 1 to "round up" + requestCopy.Request.URL.RawQuery = q.Encode() + logf("Url=%s\n", requestCopy.Request.URL.String()) + + // Set the time for this particular retry operation and then Do the operation. + tryCtx, tryCancel := context.WithTimeout(ctx, time.Second*time.Duration(timeout)) + //requestCopy.Body = &deadlineExceededReadCloser{r: requestCopy.Request.Body} + response, err = next.Do(tryCtx, requestCopy) // Make the request + /*err = improveDeadlineExceeded(err) + if err == nil { + response.Response().Body = &deadlineExceededReadCloser{r: response.Response().Body} + }*/ + logf("Err=%v, response=%v\n", err, response) + + action := "" // This MUST get changed within the switch code below + switch { + case ctx.Err() != nil: + action = "NoRetry: Op timeout" + case !tryingPrimary && response != nil && response.Response().StatusCode == http.StatusNotFound: + // If attempt was against the secondary & it returned a StatusNotFound (404), then + // the resource was not found. This may be due to replication delay. So, in this + // case, we'll never try the secondary again for this operation. + considerSecondary = false + action = "Retry: Secondary URL returned 404" + case err != nil: + // NOTE: Protocol Responder returns non-nil if REST API returns invalid status code for the invoked operation + if netErr, ok := err.(net.Error); ok && (netErr.Temporary() || netErr.Timeout()) { + action = "Retry: net.Error and Temporary() or Timeout()" + } else { + action = "NoRetry: unrecognized error" + } + default: + action = "NoRetry: successful HTTP request" // no error + } + + logf("Action=%s\n", action) + // fmt.Println(action + "\n") // This is where we could log the retry operation; action is why we're retrying + if action[0] != 'R' { // Retry only if action starts with 'R' + if err != nil { + tryCancel() // If we're returning an error, cancel this current/last per-retry timeout context + } else { + // TODO: Right now, we've decided to leak the per-try Context until the user's Context is canceled. + // Another option is that we wrap the last per-try context in a body and overwrite the Response's Body field with our wrapper. + // So, when the user closes the Body, the our per-try context gets closed too. + // Another option, is that the Last Policy do this wrapping for a per-retry context (not for the user's context) + _ = tryCancel // So, for now, we don't call cancel: cancel() + } + break // Don't retry + } + if response != nil && response.Response() != nil && response.Response().Body != nil { + // If we're going to retry and we got a previous response, then flush its body to avoid leaking its TCP connection + body := response.Response().Body + io.Copy(ioutil.Discard, body) + body.Close() + } + // If retrying, cancel the current per-try timeout context + tryCancel() + } + return response, err // Not retryable or too many retries; return the last response/error + } + }) +} + +// According to https://github.com/golang/go/wiki/CompilerOptimizations, the compiler will inline this method and hopefully optimize all calls to it away +var logf = func(format string, a ...interface{}) {} + +// Use this version to see the retry method's code path (import "fmt") +//var logf = fmt.Printf + +/* +type deadlineExceededReadCloser struct { + r io.ReadCloser +} + +func (r *deadlineExceededReadCloser) Read(p []byte) (int, error) { + n, err := 0, io.EOF + if r.r != nil { + n, err = r.r.Read(p) + } + return n, improveDeadlineExceeded(err) +} +func (r *deadlineExceededReadCloser) Seek(offset int64, whence int) (int64, error) { + // For an HTTP request, the ReadCloser MUST also implement seek + // For an HTTP response, Seek MUST not be called (or this will panic) + o, err := r.r.(io.Seeker).Seek(offset, whence) + return o, improveDeadlineExceeded(err) +} +func (r *deadlineExceededReadCloser) Close() error { + if c, ok := r.r.(io.Closer); ok { + c.Close() + } + return nil +} + +// timeoutError is the internal struct that implements our richer timeout error. +type deadlineExceeded struct { + responseError +} + +var _ net.Error = (*deadlineExceeded)(nil) // Ensure deadlineExceeded implements the net.Error interface at compile time + +// improveDeadlineExceeded creates a timeoutError object that implements the error interface IF cause is a context.DeadlineExceeded error. +func improveDeadlineExceeded(cause error) error { + // If cause is not DeadlineExceeded, return the same error passed in. + if cause != context.DeadlineExceeded { + return cause + } + // Else, convert DeadlineExceeded to our timeoutError which gives a richer string message + return &deadlineExceeded{ + responseError: responseError{ + ErrorNode: pipeline.ErrorNode{}.Initialize(cause, 3), + }, + } +} + +// Error implements the error interface's Error method to return a string representation of the error. +func (e *deadlineExceeded) Error() string { + return e.ErrorNode.Error("context deadline exceeded; when creating a pipeline, consider increasing RetryOptions' TryTimeout field") +} +*/ diff --git a/vendor/github.com/Azure/azure-storage-blob-go/2018-03-28/azblob/zc_policy_telemetry.go b/vendor/github.com/Azure/azure-storage-blob-go/2018-03-28/azblob/zc_policy_telemetry.go new file mode 100644 index 000000000000..608e1051ca0a --- /dev/null +++ b/vendor/github.com/Azure/azure-storage-blob-go/2018-03-28/azblob/zc_policy_telemetry.go @@ -0,0 +1,51 @@ +package azblob + +import ( + "bytes" + "context" + "fmt" + "os" + "runtime" + + "github.com/Azure/azure-pipeline-go/pipeline" +) + +// TelemetryOptions configures the telemetry policy's behavior. +type TelemetryOptions struct { + // Value is a string prepended to each request's User-Agent and sent to the service. + // The service records the user-agent in logs for diagnostics and tracking of client requests. + Value string +} + +// NewTelemetryPolicyFactory creates a factory that can create telemetry policy objects +// which add telemetry information to outgoing HTTP requests. +func NewTelemetryPolicyFactory(o TelemetryOptions) pipeline.Factory { + b := &bytes.Buffer{} + b.WriteString(o.Value) + if b.Len() > 0 { + b.WriteRune(' ') + } + fmt.Fprintf(b, "Azure-Storage/%s %s", serviceLibVersion, platformInfo) + telemetryValue := b.String() + + return pipeline.FactoryFunc(func(next pipeline.Policy, po *pipeline.PolicyOptions) pipeline.PolicyFunc { + return func(ctx context.Context, request pipeline.Request) (pipeline.Response, error) { + request.Header.Set("User-Agent", telemetryValue) + return next.Do(ctx, request) + } + }) +} + +// NOTE: the ONLY function that should write to this variable is this func +var platformInfo = func() string { + // Azure-Storage/version (runtime; os type and version)” + // Azure-Storage/1.4.0 (NODE-VERSION v4.5.0; Windows_NT 10.0.14393)' + operatingSystem := runtime.GOOS // Default OS string + switch operatingSystem { + case "windows": + operatingSystem = os.Getenv("OS") // Get more specific OS information + case "linux": // accept default OS info + case "freebsd": // accept default OS info + } + return fmt.Sprintf("(%s; %s)", runtime.Version(), operatingSystem) +}() diff --git a/vendor/github.com/Azure/azure-storage-blob-go/2018-03-28/azblob/zc_policy_unique_request_id.go b/vendor/github.com/Azure/azure-storage-blob-go/2018-03-28/azblob/zc_policy_unique_request_id.go new file mode 100644 index 000000000000..a75c7d1d2e18 --- /dev/null +++ b/vendor/github.com/Azure/azure-storage-blob-go/2018-03-28/azblob/zc_policy_unique_request_id.go @@ -0,0 +1,24 @@ +package azblob + +import ( + "context" + + "github.com/Azure/azure-pipeline-go/pipeline" +) + +// NewUniqueRequestIDPolicyFactory creates a UniqueRequestIDPolicyFactory object +// that sets the request's x-ms-client-request-id header if it doesn't already exist. +func NewUniqueRequestIDPolicyFactory() pipeline.Factory { + return pipeline.FactoryFunc(func(next pipeline.Policy, po *pipeline.PolicyOptions) pipeline.PolicyFunc { + // This is Policy's Do method: + return func(ctx context.Context, request pipeline.Request) (pipeline.Response, error) { + id := request.Header.Get(xMsClientRequestID) + if id == "" { // Add a unique request ID if the caller didn't specify one already + request.Header.Set(xMsClientRequestID, newUUID().String()) + } + return next.Do(ctx, request) + } + }) +} + +const xMsClientRequestID = "x-ms-client-request-id" diff --git a/vendor/github.com/Azure/azure-storage-blob-go/2018-03-28/azblob/zc_retry_reader.go b/vendor/github.com/Azure/azure-storage-blob-go/2018-03-28/azblob/zc_retry_reader.go new file mode 100644 index 000000000000..42724efa52c3 --- /dev/null +++ b/vendor/github.com/Azure/azure-storage-blob-go/2018-03-28/azblob/zc_retry_reader.go @@ -0,0 +1,122 @@ +package azblob + +import ( + "context" + "io" + "net" + "net/http" +) + +const CountToEnd = 0 + +// HTTPGetter is a function type that refers to a method that performs an HTTP GET operation. +type HTTPGetter func(ctx context.Context, i HTTPGetterInfo) (*http.Response, error) + +// HTTPGetterInfo is passed to an HTTPGetter function passing it parameters +// that should be used to make an HTTP GET request. +type HTTPGetterInfo struct { + // Offset specifies the start offset that should be used when + // creating the HTTP GET request's Range header + Offset int64 + + // Count specifies the count of bytes that should be used to calculate + // the end offset when creating the HTTP GET request's Range header + Count int64 + + // ETag specifies the resource's etag that should be used when creating + // the HTTP GET request's If-Match header + ETag ETag +} + +// RetryReaderOptions contains properties which can help to decide when to do retry. +type RetryReaderOptions struct { + // MaxRetryRequests specifies the maximum number of HTTP GET requests that will be made + // while reading from a RetryReader. A value of zero means that no additional HTTP + // GET requests will be made. + MaxRetryRequests int + doInjectError bool + doInjectErrorRound int +} + +// retryReader implements io.ReaderCloser methods. +// retryReader tries to read from response, and if there is retriable network error +// returned during reading, it will retry according to retry reader option through executing +// user defined action with provided data to get a new response, and continue the overall reading process +// through reading from the new response. +type retryReader struct { + ctx context.Context + response *http.Response + info HTTPGetterInfo + countWasBounded bool + o RetryReaderOptions + getter HTTPGetter +} + +// NewRetryReader creates a retry reader. +func NewRetryReader(ctx context.Context, initialResponse *http.Response, + info HTTPGetterInfo, o RetryReaderOptions, getter HTTPGetter) io.ReadCloser { + if getter == nil { + panic("getter must not be nil") + } + if info.Count < 0 { + panic("info.Count must be >= 0") + } + if o.MaxRetryRequests < 0 { + panic("o.MaxRetryRequests must be >= 0") + } + return &retryReader{ctx: ctx, getter: getter, info: info, countWasBounded: info.Count != CountToEnd, response: initialResponse, o: o} +} + +func (s *retryReader) Read(p []byte) (n int, err error) { + for try := 0; ; try++ { + //fmt.Println(try) // Comment out for debugging. + if s.countWasBounded && s.info.Count == CountToEnd { + // User specified an original count and the remaining bytes are 0, return 0, EOF + return 0, io.EOF + } + + if s.response == nil { // We don't have a response stream to read from, try to get one. + response, err := s.getter(s.ctx, s.info) + if err != nil { + return 0, err + } + // Successful GET; this is the network stream we'll read from. + s.response = response + } + n, err := s.response.Body.Read(p) // Read from the stream + + // Injection mechanism for testing. + if s.o.doInjectError && try == s.o.doInjectErrorRound { + err = &net.DNSError{IsTemporary: true} + } + + // We successfully read data or end EOF. + if err == nil || err == io.EOF { + s.info.Offset += int64(n) // Increments the start offset in case we need to make a new HTTP request in the future + if s.info.Count != CountToEnd { + s.info.Count -= int64(n) // Decrement the count in case we need to make a new HTTP request in the future + } + return n, err // Return the return to the caller + } + s.Close() // Error, close stream + s.response = nil // Our stream is no longer good + + // Check the retry count and error code, and decide whether to retry. + if try >= s.o.MaxRetryRequests { + return n, err // All retries exhausted + } + + if netErr, ok := err.(net.Error); ok && (netErr.Timeout() || netErr.Temporary()) { + continue + // Loop around and try to get and read from new stream. + } + return n, err // Not retryable, just return + } +} + +func (s *retryReader) Close() error { + if s.response != nil && s.response.Body != nil { + return s.response.Body.Close() + } + return nil +} diff --git a/vendor/github.com/Azure/azure-storage-blob-go/2018-03-28/azblob/zc_sas_account.go b/vendor/github.com/Azure/azure-storage-blob-go/2018-03-28/azblob/zc_sas_account.go new file mode 100644 index 000000000000..8b51193f6a07 --- /dev/null +++ b/vendor/github.com/Azure/azure-storage-blob-go/2018-03-28/azblob/zc_sas_account.go @@ -0,0 +1,217 @@ +package azblob + +import ( + "bytes" + "fmt" + "strings" + "time" +) + +// AccountSASSignatureValues is used to generate a Shared Access Signature (SAS) for an Azure Storage account. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/constructing-an-account-sas +type AccountSASSignatureValues struct { + Version string `param:"sv"` // If not specified, this defaults to SASVersion + Protocol SASProtocol `param:"spr"` // See the SASProtocol* constants + StartTime time.Time `param:"st"` // Not specified if IsZero + ExpiryTime time.Time `param:"se"` // Not specified if IsZero + Permissions string `param:"sp"` // Create by initializing a AccountSASPermissions and then call String() + IPRange IPRange `param:"sip"` + Services string `param:"ss"` // Create by initializing AccountSASServices and then call String() + ResourceTypes string `param:"srt"` // Create by initializing AccountSASResourceTypes and then call String() +} + +// NewSASQueryParameters uses an account's shared key credential to sign this signature values to produce +// the proper SAS query parameters. +func (v AccountSASSignatureValues) NewSASQueryParameters(sharedKeyCredential *SharedKeyCredential) SASQueryParameters { + // https://docs.microsoft.com/en-us/rest/api/storageservices/Constructing-an-Account-SAS + if v.ExpiryTime.IsZero() || v.Permissions == "" || v.ResourceTypes == "" || v.Services == "" { + panic("Account SAS is missing at least one of these: ExpiryTime, Permissions, Service, or ResourceType") + } + if v.Version == "" { + v.Version = SASVersion + } + perms := &AccountSASPermissions{} + if err := perms.Parse(v.Permissions); err != nil { + panic(err) + } + v.Permissions = perms.String() + + startTime, expiryTime := FormatTimesForSASSigning(v.StartTime, v.ExpiryTime) + + stringToSign := strings.Join([]string{ + sharedKeyCredential.AccountName(), + v.Permissions, + v.Services, + v.ResourceTypes, + startTime, + expiryTime, + v.IPRange.String(), + string(v.Protocol), + v.Version, + ""}, // That right, the account SAS requires a terminating extra newline + "\n") + + signature := sharedKeyCredential.ComputeHMACSHA256(stringToSign) + p := SASQueryParameters{ + // Common SAS parameters + version: v.Version, + protocol: v.Protocol, + startTime: v.StartTime, + expiryTime: v.ExpiryTime, + permissions: v.Permissions, + ipRange: v.IPRange, + + // Account-specific SAS parameters + services: v.Services, + resourceTypes: v.ResourceTypes, + + // Calculated SAS signature + signature: signature, + } + return p +} + +// The AccountSASPermissions type simplifies creating the permissions string for an Azure Storage Account SAS. +// Initialize an instance of this type and then call its String method to set AccountSASSignatureValues's Permissions field. +type AccountSASPermissions struct { + Read, Write, Delete, List, Add, Create, Update, Process bool +} + +// String produces the SAS permissions string for an Azure Storage account. +// Call this method to set AccountSASSignatureValues's Permissions field. +func (p AccountSASPermissions) String() string { + var buffer bytes.Buffer + if p.Read { + buffer.WriteRune('r') + } + if p.Write { + buffer.WriteRune('w') + } + if p.Delete { + buffer.WriteRune('d') + } + if p.List { + buffer.WriteRune('l') + } + if p.Add { + buffer.WriteRune('a') + } + if p.Create { + buffer.WriteRune('c') + } + if p.Update { + buffer.WriteRune('u') + } + if p.Process { + buffer.WriteRune('p') + } + return buffer.String() +} + +// Parse initializes the AccountSASPermissions's fields from a string. +func (p *AccountSASPermissions) Parse(s string) error { + *p = AccountSASPermissions{} // Clear out the flags + for _, r := range s { + switch r { + case 'r': + p.Read = true + case 'w': + p.Write = true + case 'd': + p.Delete = true + case 'l': + p.List = true + case 'a': + p.Add = true + case 'c': + p.Create = true + case 'u': + p.Update = true + case 'p': + p.Process = true + default: + return fmt.Errorf("Invalid permission character: '%v'", r) + } + } + return nil +} + +// The AccountSASServices type simplifies creating the services string for an Azure Storage Account SAS. +// Initialize an instance of this type and then call its String method to set AccountSASSignatureValues's Services field. +type AccountSASServices struct { + Blob, Queue, File bool +} + +// String produces the SAS services string for an Azure Storage account. +// Call this method to set AccountSASSignatureValues's Services field. +func (s AccountSASServices) String() string { + var buffer bytes.Buffer + if s.Blob { + buffer.WriteRune('b') + } + if s.Queue { + buffer.WriteRune('q') + } + if s.File { + buffer.WriteRune('f') + } + return buffer.String() +} + +// Parse initializes the AccountSASServices' fields from a string. +func (a *AccountSASServices) Parse(s string) error { + *a = AccountSASServices{} // Clear out the flags + for _, r := range s { + switch r { + case 'b': + a.Blob = true + case 'q': + a.Queue = true + case 'f': + a.File = true + default: + return fmt.Errorf("Invalid service character: '%v'", r) + } + } + return nil +} + +// The AccountSASResourceTypes type simplifies creating the resource types string for an Azure Storage Account SAS. +// Initialize an instance of this type and then call its String method to set AccountSASSignatureValues's ResourceTypes field. +type AccountSASResourceTypes struct { + Service, Container, Object bool +} + +// String produces the SAS resource types string for an Azure Storage account. +// Call this method to set AccountSASSignatureValues's ResourceTypes field. +func (rt AccountSASResourceTypes) String() string { + var buffer bytes.Buffer + if rt.Service { + buffer.WriteRune('s') + } + if rt.Container { + buffer.WriteRune('c') + } + if rt.Object { + buffer.WriteRune('o') + } + return buffer.String() +} + +// Parse initializes the AccountSASResourceType's fields from a string. +func (rt *AccountSASResourceTypes) Parse(s string) error { + *rt = AccountSASResourceTypes{} // Clear out the flags + for _, r := range s { + switch r { + case 's': + rt.Service = true + case 'q': + rt.Container = true + case 'o': + rt.Object = true + default: + return fmt.Errorf("Invalid resource type: '%v'", r) + } + } + return nil +} diff --git a/vendor/github.com/Azure/azure-storage-blob-go/2018-03-28/azblob/zc_sas_query_params.go b/vendor/github.com/Azure/azure-storage-blob-go/2018-03-28/azblob/zc_sas_query_params.go new file mode 100644 index 000000000000..db10171e7535 --- /dev/null +++ b/vendor/github.com/Azure/azure-storage-blob-go/2018-03-28/azblob/zc_sas_query_params.go @@ -0,0 +1,211 @@ +package azblob + +import ( + "net" + "net/url" + "strings" + "time" +) + +// SASVersion indicates the SAS version. +const SASVersion = ServiceVersion + +type SASProtocol string + +const ( + // SASProtocolHTTPS can be specified for a SAS protocol + SASProtocolHTTPS SASProtocol = "https" + + // SASProtocolHTTPSandHTTP can be specified for a SAS protocol + SASProtocolHTTPSandHTTP SASProtocol = "https,http" +) + +// FormatTimesForSASSigning converts a time.Time to a snapshotTimeFormat string suitable for a +// SASField's StartTime or ExpiryTime fields. Returns "" if value.IsZero(). +func FormatTimesForSASSigning(startTime, expiryTime time.Time) (string, string) { + ss := "" + if !startTime.IsZero() { + ss = startTime.Format(SASTimeFormat) // "yyyy-MM-ddTHH:mm:ssZ" + } + se := "" + if !expiryTime.IsZero() { + se = expiryTime.Format(SASTimeFormat) // "yyyy-MM-ddTHH:mm:ssZ" + } + return ss, se +} + +// SASTimeFormat represents the format of a SAS start or expiry time. Use it when formatting/parsing a time.Time. +const SASTimeFormat = "2006-01-02T15:04:05Z" //"2017-07-27T00:00:00Z" // ISO 8601 + +// https://docs.microsoft.com/en-us/rest/api/storageservices/constructing-a-service-sas + +// A SASQueryParameters object represents the components that make up an Azure Storage SAS' query parameters. +// You parse a map of query parameters into its fields by calling NewSASQueryParameters(). You add the components +// to a query parameter map by calling AddToValues(). +// NOTE: Changing any field requires computing a new SAS signature using a XxxSASSignatureValues type. +// +// This type defines the components used by all Azure Storage resources (Containers, Blobs, Files, & Queues). +type SASQueryParameters struct { + // All members are immutable or values so copies of this struct are goroutine-safe. + version string `param:"sv"` + services string `param:"ss"` + resourceTypes string `param:"srt"` + protocol SASProtocol `param:"spr"` + startTime time.Time `param:"st"` + expiryTime time.Time `param:"se"` + ipRange IPRange `param:"sip"` + identifier string `param:"si"` + resource string `param:"sr"` + permissions string `param:"sp"` + signature string `param:"sig"` +} + +func (p *SASQueryParameters) Version() string { + return p.version +} + +func (p *SASQueryParameters) Services() string { + return p.services +} +func (p *SASQueryParameters) ResourceTypes() string { + return p.resourceTypes +} +func (p *SASQueryParameters) Protocol() SASProtocol { + return p.protocol +} +func (p *SASQueryParameters) StartTime() time.Time { + return p.startTime +} +func (p *SASQueryParameters) ExpiryTime() time.Time { + return p.expiryTime +} + +func (p *SASQueryParameters) IPRange() IPRange { + return p.ipRange +} + +func (p *SASQueryParameters) Identifier() string { + return p.identifier +} + +func (p *SASQueryParameters) Resource() string { + return p.resource +} +func (p *SASQueryParameters) Permissions() string { + return p.permissions +} + +func (p *SASQueryParameters) Signature() string { + return p.signature +} + +// IPRange represents a SAS IP range's start IP and (optionally) end IP. +type IPRange struct { + Start net.IP // Not specified if length = 0 + End net.IP // Not specified if length = 0 +} + +// String returns a string representation of an IPRange. +func (ipr *IPRange) String() string { + if len(ipr.Start) == 0 { + return "" + } + start := ipr.Start.String() + if len(ipr.End) == 0 { + return start + } + return start + "-" + ipr.End.String() +} + +// NewSASQueryParameters creates and initializes a SASQueryParameters object based on the +// query parameter map's passed-in values. If deleteSASParametersFromValues is true, +// all SAS-related query parameters are removed from the passed-in map. If +// deleteSASParametersFromValues is false, the map passed-in map is unaltered. +func newSASQueryParameters(values url.Values, deleteSASParametersFromValues bool) SASQueryParameters { + p := SASQueryParameters{} + for k, v := range values { + val := v[0] + isSASKey := true + switch strings.ToLower(k) { + case "sv": + p.version = val + case "ss": + p.services = val + case "srt": + p.resourceTypes = val + case "spr": + p.protocol = SASProtocol(val) + case "st": + p.startTime, _ = time.Parse(SASTimeFormat, val) + case "se": + p.expiryTime, _ = time.Parse(SASTimeFormat, val) + case "sip": + dashIndex := strings.Index(val, "-") + if dashIndex == -1 { + p.ipRange.Start = net.ParseIP(val) + } else { + p.ipRange.Start = net.ParseIP(val[:dashIndex]) + p.ipRange.End = net.ParseIP(val[dashIndex+1:]) + } + case "si": + p.identifier = val + case "sr": + p.resource = val + case "sp": + p.permissions = val + case "sig": + p.signature = val + default: + isSASKey = false // We didn't recognize the query parameter + } + if isSASKey && deleteSASParametersFromValues { + delete(values, k) + } + } + return p +} + +// AddToValues adds the SAS components to the specified query parameters map. +func (p *SASQueryParameters) addToValues(v url.Values) url.Values { + if p.version != "" { + v.Add("sv", p.version) + } + if p.services != "" { + v.Add("ss", p.services) + } + if p.resourceTypes != "" { + v.Add("srt", p.resourceTypes) + } + if p.protocol != "" { + v.Add("spr", string(p.protocol)) + } + if !p.startTime.IsZero() { + v.Add("st", p.startTime.Format(SASTimeFormat)) + } + if !p.expiryTime.IsZero() { + v.Add("se", p.expiryTime.Format(SASTimeFormat)) + } + if len(p.ipRange.Start) > 0 { + v.Add("sip", p.ipRange.String()) + } + if p.identifier != "" { + v.Add("si", p.identifier) + } + if p.resource != "" { + v.Add("sr", p.resource) + } + if p.permissions != "" { + v.Add("sp", p.permissions) + } + if p.signature != "" { + v.Add("sig", p.signature) + } + return v +} + +// Encode encodes the SAS query parameters into URL encoded form sorted by key. +func (p *SASQueryParameters) Encode() string { + v := url.Values{} + p.addToValues(v) + return v.Encode() +} diff --git a/vendor/github.com/Azure/azure-storage-blob-go/2018-03-28/azblob/zc_service_codes_common.go b/vendor/github.com/Azure/azure-storage-blob-go/2018-03-28/azblob/zc_service_codes_common.go new file mode 100644 index 000000000000..765beb24150f --- /dev/null +++ b/vendor/github.com/Azure/azure-storage-blob-go/2018-03-28/azblob/zc_service_codes_common.go @@ -0,0 +1,131 @@ +package azblob + +// https://docs.microsoft.com/en-us/rest/api/storageservices/common-rest-api-error-codes + +const ( + // ServiceCodeNone is the default value. It indicates that the error was related to the service or that the service didn't return a code. + ServiceCodeNone ServiceCodeType = "" + + // ServiceCodeAccountAlreadyExists means the specified account already exists. + ServiceCodeAccountAlreadyExists ServiceCodeType = "AccountAlreadyExists" + + // ServiceCodeAccountBeingCreated means the specified account is in the process of being created (403). + ServiceCodeAccountBeingCreated ServiceCodeType = "AccountBeingCreated" + + // ServiceCodeAccountIsDisabled means the specified account is disabled (403). + ServiceCodeAccountIsDisabled ServiceCodeType = "AccountIsDisabled" + + // ServiceCodeAuthenticationFailed means the server failed to authenticate the request. Make sure the value of the Authorization header is formed correctly including the signature (403). + ServiceCodeAuthenticationFailed ServiceCodeType = "AuthenticationFailed" + + // ServiceCodeConditionHeadersNotSupported means the condition headers are not supported (400). + ServiceCodeConditionHeadersNotSupported ServiceCodeType = "ConditionHeadersNotSupported" + + // ServiceCodeConditionNotMet means the condition specified in the conditional header(s) was not met for a read/write operation (304/412). + ServiceCodeConditionNotMet ServiceCodeType = "ConditionNotMet" + + // ServiceCodeEmptyMetadataKey means the key for one of the metadata key-value pairs is empty (400). + ServiceCodeEmptyMetadataKey ServiceCodeType = "EmptyMetadataKey" + + // ServiceCodeInsufficientAccountPermissions means read operations are currently disabled or Write operations are not allowed or The account being accessed does not have sufficient permissions to execute this operation (403). + ServiceCodeInsufficientAccountPermissions ServiceCodeType = "InsufficientAccountPermissions" + + // ServiceCodeInternalError means the server encountered an internal error. Please retry the request (500). + ServiceCodeInternalError ServiceCodeType = "InternalError" + + // ServiceCodeInvalidAuthenticationInfo means the authentication information was not provided in the correct format. Verify the value of Authorization header (400). + ServiceCodeInvalidAuthenticationInfo ServiceCodeType = "InvalidAuthenticationInfo" + + // ServiceCodeInvalidHeaderValue means the value provided for one of the HTTP headers was not in the correct format (400). + ServiceCodeInvalidHeaderValue ServiceCodeType = "InvalidHeaderValue" + + // ServiceCodeInvalidHTTPVerb means the HTTP verb specified was not recognized by the server (400). + ServiceCodeInvalidHTTPVerb ServiceCodeType = "InvalidHttpVerb" + + // ServiceCodeInvalidInput means one of the request inputs is not valid (400). + ServiceCodeInvalidInput ServiceCodeType = "InvalidInput" + + // ServiceCodeInvalidMd5 means the MD5 value specified in the request is invalid. The MD5 value must be 128 bits and Base64-encoded (400). + ServiceCodeInvalidMd5 ServiceCodeType = "InvalidMd5" + + // ServiceCodeInvalidMetadata means the specified metadata is invalid. It includes characters that are not permitted (400). + ServiceCodeInvalidMetadata ServiceCodeType = "InvalidMetadata" + + // ServiceCodeInvalidQueryParameterValue means an invalid value was specified for one of the query parameters in the request URI (400). + ServiceCodeInvalidQueryParameterValue ServiceCodeType = "InvalidQueryParameterValue" + + // ServiceCodeInvalidRange means the range specified is invalid for the current size of the resource (416). + ServiceCodeInvalidRange ServiceCodeType = "InvalidRange" + + // ServiceCodeInvalidResourceName means the specified resource name contains invalid characters (400). + ServiceCodeInvalidResourceName ServiceCodeType = "InvalidResourceName" + + // ServiceCodeInvalidURI means the requested URI does not represent any resource on the server (400). + ServiceCodeInvalidURI ServiceCodeType = "InvalidUri" + + // ServiceCodeInvalidXMLDocument means the specified XML is not syntactically valid (400). + ServiceCodeInvalidXMLDocument ServiceCodeType = "InvalidXmlDocument" + + // ServiceCodeInvalidXMLNodeValue means the value provided for one of the XML nodes in the request body was not in the correct format (400). + ServiceCodeInvalidXMLNodeValue ServiceCodeType = "InvalidXmlNodeValue" + + // ServiceCodeMd5Mismatch means the MD5 value specified in the request did not match the MD5 value calculated by the server (400). + ServiceCodeMd5Mismatch ServiceCodeType = "Md5Mismatch" + + // ServiceCodeMetadataTooLarge means the size of the specified metadata exceeds the maximum size permitted (400). + ServiceCodeMetadataTooLarge ServiceCodeType = "MetadataTooLarge" + + // ServiceCodeMissingContentLengthHeader means the Content-Length header was not specified (411). + ServiceCodeMissingContentLengthHeader ServiceCodeType = "MissingContentLengthHeader" + + // ServiceCodeMissingRequiredQueryParameter means a required query parameter was not specified for this request (400). + ServiceCodeMissingRequiredQueryParameter ServiceCodeType = "MissingRequiredQueryParameter" + + // ServiceCodeMissingRequiredHeader means a required HTTP header was not specified (400). + ServiceCodeMissingRequiredHeader ServiceCodeType = "MissingRequiredHeader" + + // ServiceCodeMissingRequiredXMLNode means a required XML node was not specified in the request body (400). + ServiceCodeMissingRequiredXMLNode ServiceCodeType = "MissingRequiredXmlNode" + + // ServiceCodeMultipleConditionHeadersNotSupported means multiple condition headers are not supported (400). + ServiceCodeMultipleConditionHeadersNotSupported ServiceCodeType = "MultipleConditionHeadersNotSupported" + + // ServiceCodeOperationTimedOut means the operation could not be completed within the permitted time (500). + ServiceCodeOperationTimedOut ServiceCodeType = "OperationTimedOut" + + // ServiceCodeOutOfRangeInput means one of the request inputs is out of range (400). + ServiceCodeOutOfRangeInput ServiceCodeType = "OutOfRangeInput" + + // ServiceCodeOutOfRangeQueryParameterValue means a query parameter specified in the request URI is outside the permissible range (400). + ServiceCodeOutOfRangeQueryParameterValue ServiceCodeType = "OutOfRangeQueryParameterValue" + + // ServiceCodeRequestBodyTooLarge means the size of the request body exceeds the maximum size permitted (413). + ServiceCodeRequestBodyTooLarge ServiceCodeType = "RequestBodyTooLarge" + + // ServiceCodeResourceTypeMismatch means the specified resource type does not match the type of the existing resource (409). + ServiceCodeResourceTypeMismatch ServiceCodeType = "ResourceTypeMismatch" + + // ServiceCodeRequestURLFailedToParse means the url in the request could not be parsed (400). + ServiceCodeRequestURLFailedToParse ServiceCodeType = "RequestUrlFailedToParse" + + // ServiceCodeResourceAlreadyExists means the specified resource already exists (409). + ServiceCodeResourceAlreadyExists ServiceCodeType = "ResourceAlreadyExists" + + // ServiceCodeResourceNotFound means the specified resource does not exist (404). + ServiceCodeResourceNotFound ServiceCodeType = "ResourceNotFound" + + // ServiceCodeServerBusy means the server is currently unable to receive requests. Please retry your request or Ingress/egress is over the account limit or operations per second is over the account limit (503). + ServiceCodeServerBusy ServiceCodeType = "ServerBusy" + + // ServiceCodeUnsupportedHeader means one of the HTTP headers specified in the request is not supported (400). + ServiceCodeUnsupportedHeader ServiceCodeType = "UnsupportedHeader" + + // ServiceCodeUnsupportedXMLNode means one of the XML nodes specified in the request body is not supported (400). + ServiceCodeUnsupportedXMLNode ServiceCodeType = "UnsupportedXmlNode" + + // ServiceCodeUnsupportedQueryParameter means one of the query parameters specified in the request URI is not supported (400). + ServiceCodeUnsupportedQueryParameter ServiceCodeType = "UnsupportedQueryParameter" + + // ServiceCodeUnsupportedHTTPVerb means the resource doesn't support the specified HTTP verb (405). + ServiceCodeUnsupportedHTTPVerb ServiceCodeType = "UnsupportedHttpVerb" +) diff --git a/vendor/github.com/Azure/azure-storage-blob-go/2018-03-28/azblob/zc_storage_error.go b/vendor/github.com/Azure/azure-storage-blob-go/2018-03-28/azblob/zc_storage_error.go new file mode 100644 index 000000000000..03178b247abd --- /dev/null +++ b/vendor/github.com/Azure/azure-storage-blob-go/2018-03-28/azblob/zc_storage_error.go @@ -0,0 +1,110 @@ +package azblob + +import ( + "bytes" + "encoding/xml" + "fmt" + "net/http" + "sort" + + "github.com/Azure/azure-pipeline-go/pipeline" +) + +func init() { + // wire up our custom error handling constructor + responseErrorFactory = newStorageError +} + +// ServiceCodeType is a string identifying a storage service error. +// For more information, see https://docs.microsoft.com/en-us/rest/api/storageservices/status-and-error-codes2 +type ServiceCodeType string + +// StorageError identifies a responder-generated network or response parsing error. +type StorageError interface { + // ResponseError implements error's Error(), net.Error's Temporary() and Timeout() methods & Response(). + ResponseError + + // ServiceCode returns a service error code. Your code can use this to make error recovery decisions. + ServiceCode() ServiceCodeType +} + +// storageError is the internal struct that implements the public StorageError interface. +type storageError struct { + responseError + serviceCode ServiceCodeType + details map[string]string +} + +// newStorageError creates an error object that implements the error interface. +func newStorageError(cause error, response *http.Response, description string) error { + return &storageError{ + responseError: responseError{ + ErrorNode: pipeline.ErrorNode{}.Initialize(cause, 3), + response: response, + description: description, + }, + } +} + +// ServiceCode returns service-error information. The caller may examine these values but should not modify any of them. +func (e *storageError) ServiceCode() ServiceCodeType { return e.serviceCode } + +// Error implements the error interface's Error method to return a string representation of the error. +func (e *storageError) Error() string { + b := &bytes.Buffer{} + fmt.Fprintf(b, "===== RESPONSE ERROR (ServiceCode=%s) =====\n", e.serviceCode) + fmt.Fprintf(b, "Description=%s, Details: ", e.description) + if len(e.details) == 0 { + b.WriteString("(none)\n") + } else { + b.WriteRune('\n') + keys := make([]string, 0, len(e.details)) + // Alphabetize the details + for k := range e.details { + keys = append(keys, k) + } + sort.Strings(keys) + for _, k := range keys { + fmt.Fprintf(b, " %s: %+v\n", k, e.details[k]) + } + } + req := pipeline.Request{Request: e.response.Request}.Copy() // Make a copy of the response's request + pipeline.WriteRequestWithResponse(b, prepareRequestForLogging(req), e.response, nil) + return e.ErrorNode.Error(b.String()) +} + +// Temporary returns true if the error occurred due to a temporary condition (including an HTTP status of 500 or 503). +func (e *storageError) Temporary() bool { + if e.response != nil { + if (e.response.StatusCode == http.StatusInternalServerError) || (e.response.StatusCode == http.StatusServiceUnavailable) { + return true + } + } + return e.ErrorNode.Temporary() +} + +// UnmarshalXML performs custom unmarshalling of XML-formatted Azure storage request errors. +func (e *storageError) UnmarshalXML(d *xml.Decoder, start xml.StartElement) (err error) { + tokName := "" + var t xml.Token + for t, err = d.Token(); err == nil; t, err = d.Token() { + switch tt := t.(type) { + case xml.StartElement: + tokName = tt.Name.Local + break + case xml.CharData: + switch tokName { + case "Code": + e.serviceCode = ServiceCodeType(tt) + case "Message": + e.description = string(tt) + default: + if e.details == nil { + e.details = map[string]string{} + } + e.details[tokName] = string(tt) + } + } + } + return nil +} diff --git a/vendor/github.com/Azure/azure-storage-blob-go/2018-03-28/azblob/zc_util_validate.go b/vendor/github.com/Azure/azure-storage-blob-go/2018-03-28/azblob/zc_util_validate.go new file mode 100644 index 000000000000..001a21c69651 --- /dev/null +++ b/vendor/github.com/Azure/azure-storage-blob-go/2018-03-28/azblob/zc_util_validate.go @@ -0,0 +1,61 @@ +package azblob + +import ( + "errors" + "fmt" + "io" + "strconv" +) + +// httpRange defines a range of bytes within an HTTP resource, starting at offset and +// ending at offset+count. A zero-value httpRange indicates the entire resource. An httpRange +// which has an offset but na zero value count indicates from the offset to the resource's end. +type httpRange struct { + offset int64 + count int64 +} + +func (r httpRange) pointers() *string { + if r.offset == 0 && r.count == 0 { // Do common case first for performance + return nil // No specified range + } + if r.offset < 0 { + panic("The range offset must be >= 0") + } + if r.count < 0 { + panic("The range count must be >= 0") + } + endOffset := "" // if count == 0 + if r.count > 0 { + endOffset = strconv.FormatInt((r.offset+r.count)-1, 10) + } + dataRange := fmt.Sprintf("bytes=%v-%s", r.offset, endOffset) + return &dataRange +} + +//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// + +func validateSeekableStreamAt0AndGetCount(body io.ReadSeeker) int64 { + if body == nil { // nil body's are "logically" seekable to 0 and are 0 bytes long + return 0 + } + validateSeekableStreamAt0(body) + count, err := body.Seek(0, io.SeekEnd) + if err != nil { + panic("failed to seek stream") + } + body.Seek(0, io.SeekStart) + return count +} + +func validateSeekableStreamAt0(body io.ReadSeeker) { + if body == nil { // nil body's are "logically" seekable to 0 + return + } + if pos, err := body.Seek(0, io.SeekCurrent); pos != 0 || err != nil { + if err != nil { + panic(err) + } + panic(errors.New("stream must be set to position 0")) + } +} diff --git a/vendor/github.com/Azure/azure-storage-blob-go/2018-03-28/azblob/zc_uuid.go b/vendor/github.com/Azure/azure-storage-blob-go/2018-03-28/azblob/zc_uuid.go new file mode 100644 index 000000000000..1fc7e89cf29e --- /dev/null +++ b/vendor/github.com/Azure/azure-storage-blob-go/2018-03-28/azblob/zc_uuid.go @@ -0,0 +1,80 @@ +package azblob + +import ( + "crypto/rand" + "fmt" + "strconv" +) + +// The UUID reserved variants. +const ( + reservedNCS byte = 0x80 + reservedRFC4122 byte = 0x40 + reservedMicrosoft byte = 0x20 + reservedFuture byte = 0x00 +) + +// A UUID representation compliant with specification in RFC 4122 document. +type uuid [16]byte + +// NewUUID returns a new uuid using RFC 4122 algorithm. +func newUUID() (u uuid) { + u = uuid{} + // Set all bits to randomly (or pseudo-randomly) chosen values. + _, err := rand.Read(u[:]) + if err != nil { + panic("ran.Read failed") + } + u[8] = (u[8] | reservedRFC4122) & 0x7F // u.setVariant(ReservedRFC4122) + + var version byte = 4 + u[6] = (u[6] & 0xF) | (version << 4) // u.setVersion(4) + return +} + +// String returns an unparsed version of the generated UUID sequence. +func (u uuid) String() string { + return fmt.Sprintf("%x-%x-%x-%x-%x", u[0:4], u[4:6], u[6:8], u[8:10], u[10:]) +} + +// ParseUUID parses a string formatted as "003020100-0504-0706-0809-0a0b0c0d0e0f" +// or "{03020100-0504-0706-0809-0a0b0c0d0e0f}" into a UUID. +func parseUUID(uuidStr string) uuid { + char := func(hexString string) byte { + i, _ := strconv.ParseUint(hexString, 16, 8) + return byte(i) + } + if uuidStr[0] == '{' { + uuidStr = uuidStr[1:] // Skip over the '{' + } + // 03020100 - 05 04 - 07 06 - 08 09 - 0a 0b 0c 0d 0e 0f + // 1 11 1 11 11 1 12 22 2 22 22 22 33 33 33 + // 01234567 8 90 12 3 45 67 8 90 12 3 45 67 89 01 23 45 + uuidVal := uuid{ + char(uuidStr[0:2]), + char(uuidStr[2:4]), + char(uuidStr[4:6]), + char(uuidStr[6:8]), + + char(uuidStr[9:11]), + char(uuidStr[11:13]), + + char(uuidStr[14:16]), + char(uuidStr[16:18]), + + char(uuidStr[19:21]), + char(uuidStr[21:23]), + + char(uuidStr[24:26]), + char(uuidStr[26:28]), + char(uuidStr[28:30]), + char(uuidStr[30:32]), + char(uuidStr[32:34]), + char(uuidStr[34:36]), + } + return uuidVal +} + +func (u uuid) bytes() []byte { + return u[:] +} diff --git a/vendor/github.com/Azure/azure-storage-blob-go/2018-03-28/azblob/zt_doc.go b/vendor/github.com/Azure/azure-storage-blob-go/2018-03-28/azblob/zt_doc.go new file mode 100644 index 000000000000..6b3779c0e980 --- /dev/null +++ b/vendor/github.com/Azure/azure-storage-blob-go/2018-03-28/azblob/zt_doc.go @@ -0,0 +1,89 @@ +// Copyright 2017 Microsoft Corporation. All rights reserved. +// Use of this source code is governed by an MIT +// license that can be found in the LICENSE file. + +/* +Package azblob allows you to manipulate Azure Storage containers and blobs objects. + +URL Types + +The most common types you'll work with are the XxxURL types. The methods of these types make requests +against the Azure Storage Service. + + - ServiceURL's methods perform operations on a storage account. + - ContainerURL's methods perform operations on an account's container. + - BlockBlobURL's methods perform operations on a container's block blob. + - AppendBlobURL's methods perform operations on a container's append blob. + - PageBlobURL's methods perform operations on a container's page blob. + - BlobURL's methods perform operations on a container's blob regardless of the blob's type. + +Internally, each XxxURL object contains a URL and a request pipeline. The URL indicates the endpoint where each HTTP +request is sent and the pipeline indicates how the outgoing HTTP request and incoming HTTP response is processed. +The pipeline specifies things like retry policies, logging, deserialization of HTTP response payloads, and more. + +Pipelines are threadsafe and may be shared by multiple XxxURL objects. When you create a ServiceURL, you pass +an initial pipeline. When you call ServiceURL's NewContainerURL method, the new ContainerURL object has its own +URL but it shares the same pipeline as the parent ServiceURL object. + +To work with a blob, call one of ContainerURL's 4 NewXxxBlobURL methods depending on how you want to treat the blob. +To treat the blob as a block blob, append blob, or page blob, call NewBlockBlobURL, NewAppendBlobURL, or NewPageBlobURL +respectively. These three types are all identical except for the methods they expose; each type exposes the methods +relevant to the type of blob represented. If you're not sure how you want to treat a blob, you can call NewBlobURL; +this returns an object whose methods are relevant to any kind of blob. When you call ContainerURL's NewXxxBlobURL, +the new XxxBlobURL object has its own URL but it shares the same pipeline as the parent ContainerURL object. You +can easily switch between blob types (method sets) by calling a ToXxxBlobURL method. + +If you'd like to use a different pipeline with a ServiceURL, ContainerURL, or XxxBlobURL object, then call the XxxURL +object's WithPipeline method passing in the desired pipeline. The WithPipeline methods create a new XxxURL object +with the same URL as the original but with the specified pipeline. + +Note that XxxURL objects use little memory, are goroutine-safe, and many objects share the same pipeline. This means that +XxxURL objects share a lot of system resources making them very efficient. + +All of XxxURL's methods that make HTTP requests return rich error handling information so you can discern network failures, +transient failures, timeout failures, service failures, etc. See the StorageError interface for more information and an +example of how to do deal with errors. + +URL and Shared Access Signature Manipulation + +The library includes a BlobURLParts type for deconstructing and reconstructing URLs. And you can use the following types +for generating and parsing Shared Access Signature (SAS) + - Use the AccountSASSignatureValues type to create a SAS for a storage account. + - Use the BlobSASSignatureValues type to create a SAS for a container or blob. + - Use the SASQueryParameters type to turn signature values in to query parameres or to parse query parameters. + +To generate a SAS, you must use the SharedKeyCredential type. + +Credentials + +When creating a request pipeline, you must specify one of this package's credential types. + - Call the NewAnonymousCredential function for requests that contain a Shared Access Signature (SAS). + - Call the NewSharedKeyCredential function (with an account name & key) to access any account resources. You must also use this + to generate Shared Access Signatures. + +HTTP Request Policy Factories + +This package defines several request policy factories for use with the pipeline package. +Most applications will not use these factories directly; instead, the NewPipeline +function creates these factories, initializes them (via the PipelineOptions type) +and returns a pipeline object for use by the XxxURL objects. + +However, for advanced scenarios, developers can access these policy factories directly +and even create their own and then construct their own pipeline in order to affect HTTP +requests and responses performed by the XxxURL objects. For example, developers can +introduce their own logging, random failures, request recording & playback for fast +testing, HTTP request pacing, alternate retry mechanisms, metering, metrics, etc. The +possibilities are endless! + +Below are the request pipeline policy factory functions that are provided with this +package: + - NewRetryPolicyFactory Enables rich retry semantics for failed HTTP requests. + - NewRequestLogPolicyFactory Enables rich logging support for HTTP requests/responses & failures. + - NewTelemetryPolicyFactory Enables simple modification of the HTTP request's User-Agent header so each request reports the SDK version & language/runtime making the requests. + - NewUniqueRequestIDPolicyFactory Adds a x-ms-client-request-id header with a unique UUID value to an HTTP request to help with diagnosing failures. + +Also, note that all the NewXxxCredential functions return request policy factory objects which get injected into the pipeline. +*/ +package azblob + +// TokenCredential Use this to access resources using Role-Based Access Control (RBAC). diff --git a/vendor/github.com/Azure/azure-storage-blob-go/2018-03-28/azblob/zz_generated_append_blob.go b/vendor/github.com/Azure/azure-storage-blob-go/2018-03-28/azblob/zz_generated_append_blob.go new file mode 100644 index 000000000000..8f3dc9934973 --- /dev/null +++ b/vendor/github.com/Azure/azure-storage-blob-go/2018-03-28/azblob/zz_generated_append_blob.go @@ -0,0 +1,234 @@ +package azblob + +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "context" + "encoding/base64" + "github.com/Azure/azure-pipeline-go/pipeline" + "io" + "io/ioutil" + "net/http" + "net/url" + "strconv" + "time" +) + +// appendBlobClient is the client for the AppendBlob methods of the Azblob service. +type appendBlobClient struct { + managementClient +} + +// newAppendBlobClient creates an instance of the appendBlobClient client. +func newAppendBlobClient(url url.URL, p pipeline.Pipeline) appendBlobClient { + return appendBlobClient{newManagementClient(url, p)} +} + +// AppendBlock the Append Block operation commits a new block of data to the end of an existing append blob. The Append +// Block operation is permitted only if the blob was created with x-ms-blob-type set to AppendBlob. Append Block is +// supported only on version 2015-02-21 version or later. +// +// body is initial data body will be closed upon successful return. Callers should ensure closure when receiving an +// error.contentLength is the length of the request. timeout is the timeout parameter is expressed in seconds. For more +// information, see Setting +// Timeouts for Blob Service Operations. leaseID is if specified, the operation only succeeds if the container's +// lease is active and matches this ID. maxSize is optional conditional header. The max length in bytes permitted for +// the append blob. If the Append Block operation would cause the blob to exceed that limit or if the blob size is +// already greater than the value specified in this header, the request will fail with MaxBlobSizeConditionNotMet error +// (HTTP status code 412 - Precondition Failed). appendPosition is optional conditional header, used only for the +// Append Block operation. A number indicating the byte offset to compare. Append Block will succeed only if the append +// position is equal to this number. If it is not, the request will fail with the AppendPositionConditionNotMet error +// (HTTP status code 412 - Precondition Failed). ifModifiedSince is specify this header value to operate only on a blob +// if it has been modified since the specified date/time. ifUnmodifiedSince is specify this header value to operate +// only on a blob if it has not been modified since the specified date/time. ifMatches is specify an ETag value to +// operate only on blobs with a matching value. ifNoneMatch is specify an ETag value to operate only on blobs without a +// matching value. requestID is provides a client-generated, opaque value with a 1 KB character limit that is recorded +// in the analytics logs when storage analytics logging is enabled. +func (client appendBlobClient) AppendBlock(ctx context.Context, body io.ReadSeeker, contentLength int64, timeout *int32, leaseID *string, maxSize *int64, appendPosition *int64, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatches *ETag, ifNoneMatch *ETag, requestID *string) (*AppendBlobAppendBlockResponse, error) { + if err := validate([]validation{ + {targetValue: body, + constraints: []constraint{{target: "body", name: null, rule: true, chain: nil}}}, + {targetValue: timeout, + constraints: []constraint{{target: "timeout", name: null, rule: false, + chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { + return nil, err + } + req, err := client.appendBlockPreparer(body, contentLength, timeout, leaseID, maxSize, appendPosition, ifModifiedSince, ifUnmodifiedSince, ifMatches, ifNoneMatch, requestID) + if err != nil { + return nil, err + } + resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.appendBlockResponder}, req) + if err != nil { + return nil, err + } + return resp.(*AppendBlobAppendBlockResponse), err +} + +// appendBlockPreparer prepares the AppendBlock request. +func (client appendBlobClient) appendBlockPreparer(body io.ReadSeeker, contentLength int64, timeout *int32, leaseID *string, maxSize *int64, appendPosition *int64, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatches *ETag, ifNoneMatch *ETag, requestID *string) (pipeline.Request, error) { + req, err := pipeline.NewRequest("PUT", client.url, body) + if err != nil { + return req, pipeline.NewError(err, "failed to create request") + } + params := req.URL.Query() + if timeout != nil { + params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) + } + params.Set("comp", "appendblock") + req.URL.RawQuery = params.Encode() + req.Header.Set("Content-Length", strconv.FormatInt(contentLength, 10)) + if leaseID != nil { + req.Header.Set("x-ms-lease-id", *leaseID) + } + if maxSize != nil { + req.Header.Set("x-ms-blob-condition-maxsize", strconv.FormatInt(*maxSize, 10)) + } + if appendPosition != nil { + req.Header.Set("x-ms-blob-condition-appendpos", strconv.FormatInt(*appendPosition, 10)) + } + if ifModifiedSince != nil { + req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123)) + } + if ifUnmodifiedSince != nil { + req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123)) + } + if ifMatches != nil { + req.Header.Set("If-Match", string(*ifMatches)) + } + if ifNoneMatch != nil { + req.Header.Set("If-None-Match", string(*ifNoneMatch)) + } + req.Header.Set("x-ms-version", ServiceVersion) + if requestID != nil { + req.Header.Set("x-ms-client-request-id", *requestID) + } + return req, nil +} + +// appendBlockResponder handles the response to the AppendBlock request. +func (client appendBlobClient) appendBlockResponder(resp pipeline.Response) (pipeline.Response, error) { + err := validateResponse(resp, http.StatusOK, http.StatusCreated) + if resp == nil { + return nil, err + } + io.Copy(ioutil.Discard, resp.Response().Body) + resp.Response().Body.Close() + return &AppendBlobAppendBlockResponse{rawResponse: resp.Response()}, err +} + +// Create the Create Append Blob operation creates a new append blob. +// +// contentLength is the length of the request. timeout is the timeout parameter is expressed in seconds. For more +// information, see Setting +// Timeouts for Blob Service Operations. blobContentType is optional. Sets the blob's content type. If specified, +// this property is stored with the blob and returned with a read request. blobContentEncoding is optional. Sets the +// blob's content encoding. If specified, this property is stored with the blob and returned with a read request. +// blobContentLanguage is optional. Set the blob's content language. If specified, this property is stored with the +// blob and returned with a read request. blobContentMD5 is optional. An MD5 hash of the blob content. Note that this +// hash is not validated, as the hashes for the individual blocks were validated when each was uploaded. +// blobCacheControl is optional. Sets the blob's cache control. If specified, this property is stored with the blob and +// returned with a read request. metadata is optional. Specifies a user-defined name-value pair associated with the +// blob. If no name-value pairs are specified, the operation will copy the metadata from the source blob or file to the +// destination blob. If one or more name-value pairs are specified, the destination blob is created with the specified +// metadata, and metadata is not copied from the source blob or file. Note that beginning with version 2009-09-19, +// metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, Blobs, and +// Metadata for more information. leaseID is if specified, the operation only succeeds if the container's lease is +// active and matches this ID. blobContentDisposition is optional. Sets the blob's Content-Disposition header. +// ifModifiedSince is specify this header value to operate only on a blob if it has been modified since the specified +// date/time. ifUnmodifiedSince is specify this header value to operate only on a blob if it has not been modified +// since the specified date/time. ifMatches is specify an ETag value to operate only on blobs with a matching value. +// ifNoneMatch is specify an ETag value to operate only on blobs without a matching value. requestID is provides a +// client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage +// analytics logging is enabled. +func (client appendBlobClient) Create(ctx context.Context, contentLength int64, timeout *int32, blobContentType *string, blobContentEncoding *string, blobContentLanguage *string, blobContentMD5 []byte, blobCacheControl *string, metadata map[string]string, leaseID *string, blobContentDisposition *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatches *ETag, ifNoneMatch *ETag, requestID *string) (*AppendBlobCreateResponse, error) { + if err := validate([]validation{ + {targetValue: timeout, + constraints: []constraint{{target: "timeout", name: null, rule: false, + chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}, + {targetValue: metadata, + constraints: []constraint{{target: "metadata", name: null, rule: false, + chain: []constraint{{target: "metadata", name: pattern, rule: `^[a-zA-Z]+$`, chain: nil}}}}}}); err != nil { + return nil, err + } + req, err := client.createPreparer(contentLength, timeout, blobContentType, blobContentEncoding, blobContentLanguage, blobContentMD5, blobCacheControl, metadata, leaseID, blobContentDisposition, ifModifiedSince, ifUnmodifiedSince, ifMatches, ifNoneMatch, requestID) + if err != nil { + return nil, err + } + resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.createResponder}, req) + if err != nil { + return nil, err + } + return resp.(*AppendBlobCreateResponse), err +} + +// createPreparer prepares the Create request. +func (client appendBlobClient) createPreparer(contentLength int64, timeout *int32, blobContentType *string, blobContentEncoding *string, blobContentLanguage *string, blobContentMD5 []byte, blobCacheControl *string, metadata map[string]string, leaseID *string, blobContentDisposition *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatches *ETag, ifNoneMatch *ETag, requestID *string) (pipeline.Request, error) { + req, err := pipeline.NewRequest("PUT", client.url, nil) + if err != nil { + return req, pipeline.NewError(err, "failed to create request") + } + params := req.URL.Query() + if timeout != nil { + params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) + } + req.URL.RawQuery = params.Encode() + req.Header.Set("Content-Length", strconv.FormatInt(contentLength, 10)) + if blobContentType != nil { + req.Header.Set("x-ms-blob-content-type", *blobContentType) + } + if blobContentEncoding != nil { + req.Header.Set("x-ms-blob-content-encoding", *blobContentEncoding) + } + if blobContentLanguage != nil { + req.Header.Set("x-ms-blob-content-language", *blobContentLanguage) + } + if blobContentMD5 != nil { + req.Header.Set("x-ms-blob-content-md5", base64.StdEncoding.EncodeToString(blobContentMD5)) + } + if blobCacheControl != nil { + req.Header.Set("x-ms-blob-cache-control", *blobCacheControl) + } + if metadata != nil { + for k, v := range metadata { + req.Header.Set("x-ms-meta-"+k, v) + } + } + if leaseID != nil { + req.Header.Set("x-ms-lease-id", *leaseID) + } + if blobContentDisposition != nil { + req.Header.Set("x-ms-blob-content-disposition", *blobContentDisposition) + } + if ifModifiedSince != nil { + req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123)) + } + if ifUnmodifiedSince != nil { + req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123)) + } + if ifMatches != nil { + req.Header.Set("If-Match", string(*ifMatches)) + } + if ifNoneMatch != nil { + req.Header.Set("If-None-Match", string(*ifNoneMatch)) + } + req.Header.Set("x-ms-version", ServiceVersion) + if requestID != nil { + req.Header.Set("x-ms-client-request-id", *requestID) + } + req.Header.Set("x-ms-blob-type", "AppendBlob") + return req, nil +} + +// createResponder handles the response to the Create request. +func (client appendBlobClient) createResponder(resp pipeline.Response) (pipeline.Response, error) { + err := validateResponse(resp, http.StatusOK, http.StatusCreated) + if resp == nil { + return nil, err + } + io.Copy(ioutil.Discard, resp.Response().Body) + resp.Response().Body.Close() + return &AppendBlobCreateResponse{rawResponse: resp.Response()}, err +} diff --git a/vendor/github.com/Azure/azure-storage-blob-go/2018-03-28/azblob/zz_generated_blob.go b/vendor/github.com/Azure/azure-storage-blob-go/2018-03-28/azblob/zz_generated_blob.go new file mode 100644 index 000000000000..b2f8eac665b7 --- /dev/null +++ b/vendor/github.com/Azure/azure-storage-blob-go/2018-03-28/azblob/zz_generated_blob.go @@ -0,0 +1,1233 @@ +package azblob + +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "context" + "encoding/base64" + "io" + "io/ioutil" + "net/http" + "net/url" + "strconv" + "time" + + "github.com/Azure/azure-pipeline-go/pipeline" +) + +// blobClient is the client for the Blob methods of the Azblob service. +type blobClient struct { + managementClient +} + +// newBlobClient creates an instance of the blobClient client. +func newBlobClient(url url.URL, p pipeline.Pipeline) blobClient { + return blobClient{newManagementClient(url, p)} +} + +// AbortCopyFromURL the Abort Copy From URL operation aborts a pending Copy From URL operation, and leaves a +// destination blob with zero length and full metadata. +// +// copyID is the copy identifier provided in the x-ms-copy-id header of the original Copy Blob operation. timeout is +// the timeout parameter is expressed in seconds. For more information, see Setting +// Timeouts for Blob Service Operations. leaseID is if specified, the operation only succeeds if the container's +// lease is active and matches this ID. requestID is provides a client-generated, opaque value with a 1 KB character +// limit that is recorded in the analytics logs when storage analytics logging is enabled. +func (client blobClient) AbortCopyFromURL(ctx context.Context, copyID string, timeout *int32, leaseID *string, requestID *string) (*BlobAbortCopyFromURLResponse, error) { + if err := validate([]validation{ + {targetValue: timeout, + constraints: []constraint{{target: "timeout", name: null, rule: false, + chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { + return nil, err + } + req, err := client.abortCopyFromURLPreparer(copyID, timeout, leaseID, requestID) + if err != nil { + return nil, err + } + resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.abortCopyFromURLResponder}, req) + if err != nil { + return nil, err + } + return resp.(*BlobAbortCopyFromURLResponse), err +} + +// abortCopyFromURLPreparer prepares the AbortCopyFromURL request. +func (client blobClient) abortCopyFromURLPreparer(copyID string, timeout *int32, leaseID *string, requestID *string) (pipeline.Request, error) { + req, err := pipeline.NewRequest("PUT", client.url, nil) + if err != nil { + return req, pipeline.NewError(err, "failed to create request") + } + params := req.URL.Query() + params.Set("copyid", copyID) + if timeout != nil { + params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) + } + params.Set("comp", "copy") + req.URL.RawQuery = params.Encode() + if leaseID != nil { + req.Header.Set("x-ms-lease-id", *leaseID) + } + req.Header.Set("x-ms-version", ServiceVersion) + if requestID != nil { + req.Header.Set("x-ms-client-request-id", *requestID) + } + req.Header.Set("x-ms-copy-action", "abort") + return req, nil +} + +// abortCopyFromURLResponder handles the response to the AbortCopyFromURL request. +func (client blobClient) abortCopyFromURLResponder(resp pipeline.Response) (pipeline.Response, error) { + err := validateResponse(resp, http.StatusOK, http.StatusNoContent) + if resp == nil { + return nil, err + } + io.Copy(ioutil.Discard, resp.Response().Body) + resp.Response().Body.Close() + return &BlobAbortCopyFromURLResponse{rawResponse: resp.Response()}, err +} + +// AcquireLease [Update] The Lease Blob operation establishes and manages a lock on a blob for write and delete +// operations +// +// timeout is the timeout parameter is expressed in seconds. For more information, see Setting +// Timeouts for Blob Service Operations. duration is specifies the duration of the lease, in seconds, or negative +// one (-1) for a lease that never expires. A non-infinite lease can be between 15 and 60 seconds. A lease duration +// cannot be changed using renew or change. proposedLeaseID is proposed lease ID, in a GUID string format. The Blob +// service returns 400 (Invalid request) if the proposed lease ID is not in the correct format. See Guid Constructor +// (String) for a list of valid GUID string formats. ifModifiedSince is specify this header value to operate only on a +// blob if it has been modified since the specified date/time. ifUnmodifiedSince is specify this header value to +// operate only on a blob if it has not been modified since the specified date/time. ifMatches is specify an ETag value +// to operate only on blobs with a matching value. ifNoneMatch is specify an ETag value to operate only on blobs +// without a matching value. requestID is provides a client-generated, opaque value with a 1 KB character limit that is +// recorded in the analytics logs when storage analytics logging is enabled. +func (client blobClient) AcquireLease(ctx context.Context, timeout *int32, duration *int32, proposedLeaseID *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatches *ETag, ifNoneMatch *ETag, requestID *string) (*BlobAcquireLeaseResponse, error) { + if err := validate([]validation{ + {targetValue: timeout, + constraints: []constraint{{target: "timeout", name: null, rule: false, + chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { + return nil, err + } + req, err := client.acquireLeasePreparer(timeout, duration, proposedLeaseID, ifModifiedSince, ifUnmodifiedSince, ifMatches, ifNoneMatch, requestID) + if err != nil { + return nil, err + } + resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.acquireLeaseResponder}, req) + if err != nil { + return nil, err + } + return resp.(*BlobAcquireLeaseResponse), err +} + +// acquireLeasePreparer prepares the AcquireLease request. +func (client blobClient) acquireLeasePreparer(timeout *int32, duration *int32, proposedLeaseID *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatches *ETag, ifNoneMatch *ETag, requestID *string) (pipeline.Request, error) { + req, err := pipeline.NewRequest("PUT", client.url, nil) + if err != nil { + return req, pipeline.NewError(err, "failed to create request") + } + params := req.URL.Query() + if timeout != nil { + params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) + } + params.Set("comp", "lease") + req.URL.RawQuery = params.Encode() + if duration != nil { + req.Header.Set("x-ms-lease-duration", strconv.FormatInt(int64(*duration), 10)) + } + if proposedLeaseID != nil { + req.Header.Set("x-ms-proposed-lease-id", *proposedLeaseID) + } + if ifModifiedSince != nil { + req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123)) + } + if ifUnmodifiedSince != nil { + req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123)) + } + if ifMatches != nil { + req.Header.Set("If-Match", string(*ifMatches)) + } + if ifNoneMatch != nil { + req.Header.Set("If-None-Match", string(*ifNoneMatch)) + } + req.Header.Set("x-ms-version", ServiceVersion) + if requestID != nil { + req.Header.Set("x-ms-client-request-id", *requestID) + } + req.Header.Set("x-ms-lease-action", "acquire") + return req, nil +} + +// acquireLeaseResponder handles the response to the AcquireLease request. +func (client blobClient) acquireLeaseResponder(resp pipeline.Response) (pipeline.Response, error) { + err := validateResponse(resp, http.StatusOK, http.StatusCreated) + if resp == nil { + return nil, err + } + io.Copy(ioutil.Discard, resp.Response().Body) + resp.Response().Body.Close() + return &BlobAcquireLeaseResponse{rawResponse: resp.Response()}, err +} + +// BreakLease [Update] The Lease Blob operation establishes and manages a lock on a blob for write and delete +// operations +// +// timeout is the timeout parameter is expressed in seconds. For more information, see Setting +// Timeouts for Blob Service Operations. breakPeriod is for a break operation, proposed duration the lease should +// continue before it is broken, in seconds, between 0 and 60. This break period is only used if it is shorter than the +// time remaining on the lease. If longer, the time remaining on the lease is used. A new lease will not be available +// before the break period has expired, but the lease may be held for longer than the break period. If this header does +// not appear with a break operation, a fixed-duration lease breaks after the remaining lease period elapses, and an +// infinite lease breaks immediately. ifModifiedSince is specify this header value to operate only on a blob if it has +// been modified since the specified date/time. ifUnmodifiedSince is specify this header value to operate only on a +// blob if it has not been modified since the specified date/time. ifMatches is specify an ETag value to operate only +// on blobs with a matching value. ifNoneMatch is specify an ETag value to operate only on blobs without a matching +// value. requestID is provides a client-generated, opaque value with a 1 KB character limit that is recorded in the +// analytics logs when storage analytics logging is enabled. +func (client blobClient) BreakLease(ctx context.Context, timeout *int32, breakPeriod *int32, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatches *ETag, ifNoneMatch *ETag, requestID *string) (*BlobBreakLeaseResponse, error) { + if err := validate([]validation{ + {targetValue: timeout, + constraints: []constraint{{target: "timeout", name: null, rule: false, + chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { + return nil, err + } + req, err := client.breakLeasePreparer(timeout, breakPeriod, ifModifiedSince, ifUnmodifiedSince, ifMatches, ifNoneMatch, requestID) + if err != nil { + return nil, err + } + resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.breakLeaseResponder}, req) + if err != nil { + return nil, err + } + return resp.(*BlobBreakLeaseResponse), err +} + +// breakLeasePreparer prepares the BreakLease request. +func (client blobClient) breakLeasePreparer(timeout *int32, breakPeriod *int32, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatches *ETag, ifNoneMatch *ETag, requestID *string) (pipeline.Request, error) { + req, err := pipeline.NewRequest("PUT", client.url, nil) + if err != nil { + return req, pipeline.NewError(err, "failed to create request") + } + params := req.URL.Query() + if timeout != nil { + params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) + } + params.Set("comp", "lease") + req.URL.RawQuery = params.Encode() + if breakPeriod != nil { + req.Header.Set("x-ms-lease-break-period", strconv.FormatInt(int64(*breakPeriod), 10)) + } + if ifModifiedSince != nil { + req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123)) + } + if ifUnmodifiedSince != nil { + req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123)) + } + if ifMatches != nil { + req.Header.Set("If-Match", string(*ifMatches)) + } + if ifNoneMatch != nil { + req.Header.Set("If-None-Match", string(*ifNoneMatch)) + } + req.Header.Set("x-ms-version", ServiceVersion) + if requestID != nil { + req.Header.Set("x-ms-client-request-id", *requestID) + } + req.Header.Set("x-ms-lease-action", "break") + return req, nil +} + +// breakLeaseResponder handles the response to the BreakLease request. +func (client blobClient) breakLeaseResponder(resp pipeline.Response) (pipeline.Response, error) { + err := validateResponse(resp, http.StatusOK, http.StatusAccepted) + if resp == nil { + return nil, err + } + io.Copy(ioutil.Discard, resp.Response().Body) + resp.Response().Body.Close() + return &BlobBreakLeaseResponse{rawResponse: resp.Response()}, err +} + +// ChangeLease [Update] The Lease Blob operation establishes and manages a lock on a blob for write and delete +// operations +// +// leaseID is if specified, the operation only succeeds if the container's lease is active and matches this ID. +// proposedLeaseID is proposed lease ID, in a GUID string format. The Blob service returns 400 (Invalid request) if the +// proposed lease ID is not in the correct format. See Guid Constructor (String) for a list of valid GUID string +// formats. timeout is the timeout parameter is expressed in seconds. For more information, see Setting +// Timeouts for Blob Service Operations. ifModifiedSince is specify this header value to operate only on a blob if +// it has been modified since the specified date/time. ifUnmodifiedSince is specify this header value to operate only +// on a blob if it has not been modified since the specified date/time. ifMatches is specify an ETag value to operate +// only on blobs with a matching value. ifNoneMatch is specify an ETag value to operate only on blobs without a +// matching value. requestID is provides a client-generated, opaque value with a 1 KB character limit that is recorded +// in the analytics logs when storage analytics logging is enabled. +func (client blobClient) ChangeLease(ctx context.Context, leaseID string, proposedLeaseID string, timeout *int32, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatches *ETag, ifNoneMatch *ETag, requestID *string) (*BlobChangeLeaseResponse, error) { + if err := validate([]validation{ + {targetValue: timeout, + constraints: []constraint{{target: "timeout", name: null, rule: false, + chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { + return nil, err + } + req, err := client.changeLeasePreparer(leaseID, proposedLeaseID, timeout, ifModifiedSince, ifUnmodifiedSince, ifMatches, ifNoneMatch, requestID) + if err != nil { + return nil, err + } + resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.changeLeaseResponder}, req) + if err != nil { + return nil, err + } + return resp.(*BlobChangeLeaseResponse), err +} + +// changeLeasePreparer prepares the ChangeLease request. +func (client blobClient) changeLeasePreparer(leaseID string, proposedLeaseID string, timeout *int32, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatches *ETag, ifNoneMatch *ETag, requestID *string) (pipeline.Request, error) { + req, err := pipeline.NewRequest("PUT", client.url, nil) + if err != nil { + return req, pipeline.NewError(err, "failed to create request") + } + params := req.URL.Query() + if timeout != nil { + params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) + } + params.Set("comp", "lease") + req.URL.RawQuery = params.Encode() + req.Header.Set("x-ms-lease-id", leaseID) + req.Header.Set("x-ms-proposed-lease-id", proposedLeaseID) + if ifModifiedSince != nil { + req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123)) + } + if ifUnmodifiedSince != nil { + req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123)) + } + if ifMatches != nil { + req.Header.Set("If-Match", string(*ifMatches)) + } + if ifNoneMatch != nil { + req.Header.Set("If-None-Match", string(*ifNoneMatch)) + } + req.Header.Set("x-ms-version", ServiceVersion) + if requestID != nil { + req.Header.Set("x-ms-client-request-id", *requestID) + } + req.Header.Set("x-ms-lease-action", "change") + return req, nil +} + +// changeLeaseResponder handles the response to the ChangeLease request. +func (client blobClient) changeLeaseResponder(resp pipeline.Response) (pipeline.Response, error) { + err := validateResponse(resp, http.StatusOK) + if resp == nil { + return nil, err + } + io.Copy(ioutil.Discard, resp.Response().Body) + resp.Response().Body.Close() + return &BlobChangeLeaseResponse{rawResponse: resp.Response()}, err +} + +// CreateSnapshot the Create Snapshot operation creates a read-only snapshot of a blob +// +// timeout is the timeout parameter is expressed in seconds. For more information, see Setting +// Timeouts for Blob Service Operations. metadata is optional. Specifies a user-defined name-value pair associated +// with the blob. If no name-value pairs are specified, the operation will copy the metadata from the source blob or +// file to the destination blob. If one or more name-value pairs are specified, the destination blob is created with +// the specified metadata, and metadata is not copied from the source blob or file. Note that beginning with version +// 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing +// Containers, Blobs, and Metadata for more information. ifModifiedSince is specify this header value to operate only +// on a blob if it has been modified since the specified date/time. ifUnmodifiedSince is specify this header value to +// operate only on a blob if it has not been modified since the specified date/time. ifMatches is specify an ETag value +// to operate only on blobs with a matching value. ifNoneMatch is specify an ETag value to operate only on blobs +// without a matching value. leaseID is if specified, the operation only succeeds if the container's lease is active +// and matches this ID. requestID is provides a client-generated, opaque value with a 1 KB character limit that is +// recorded in the analytics logs when storage analytics logging is enabled. +func (client blobClient) CreateSnapshot(ctx context.Context, timeout *int32, metadata map[string]string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatches *ETag, ifNoneMatch *ETag, leaseID *string, requestID *string) (*BlobCreateSnapshotResponse, error) { + if err := validate([]validation{ + {targetValue: timeout, + constraints: []constraint{{target: "timeout", name: null, rule: false, + chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}, + {targetValue: metadata, + constraints: []constraint{{target: "metadata", name: null, rule: false, + chain: []constraint{{target: "metadata", name: pattern, rule: `^[a-zA-Z]+$`, chain: nil}}}}}}); err != nil { + return nil, err + } + req, err := client.createSnapshotPreparer(timeout, metadata, ifModifiedSince, ifUnmodifiedSince, ifMatches, ifNoneMatch, leaseID, requestID) + if err != nil { + return nil, err + } + resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.createSnapshotResponder}, req) + if err != nil { + return nil, err + } + return resp.(*BlobCreateSnapshotResponse), err +} + +// createSnapshotPreparer prepares the CreateSnapshot request. +func (client blobClient) createSnapshotPreparer(timeout *int32, metadata map[string]string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatches *ETag, ifNoneMatch *ETag, leaseID *string, requestID *string) (pipeline.Request, error) { + req, err := pipeline.NewRequest("PUT", client.url, nil) + if err != nil { + return req, pipeline.NewError(err, "failed to create request") + } + params := req.URL.Query() + if timeout != nil { + params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) + } + params.Set("comp", "snapshot") + req.URL.RawQuery = params.Encode() + if metadata != nil { + for k, v := range metadata { + req.Header.Set("x-ms-meta-"+k, v) + } + } + if ifModifiedSince != nil { + req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123)) + } + if ifUnmodifiedSince != nil { + req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123)) + } + if ifMatches != nil { + req.Header.Set("If-Match", string(*ifMatches)) + } + if ifNoneMatch != nil { + req.Header.Set("If-None-Match", string(*ifNoneMatch)) + } + if leaseID != nil { + req.Header.Set("x-ms-lease-id", *leaseID) + } + req.Header.Set("x-ms-version", ServiceVersion) + if requestID != nil { + req.Header.Set("x-ms-client-request-id", *requestID) + } + return req, nil +} + +// createSnapshotResponder handles the response to the CreateSnapshot request. +func (client blobClient) createSnapshotResponder(resp pipeline.Response) (pipeline.Response, error) { + err := validateResponse(resp, http.StatusOK, http.StatusCreated) + if resp == nil { + return nil, err + } + io.Copy(ioutil.Discard, resp.Response().Body) + resp.Response().Body.Close() + return &BlobCreateSnapshotResponse{rawResponse: resp.Response()}, err +} + +// Delete if the storage account's soft delete feature is disabled then, when a blob is deleted, it is permanently +// removed from the storage account. If the storage account's soft delete feature is enabled, then, when a blob is +// deleted, it is marked for deletion and becomes inaccessible immediately. However, the blob service retains the blob +// or snapshot for the number of days specified by the DeleteRetentionPolicy section of [Storage service properties] +// (Set-Blob-Service-Properties.md). After the specified number of days has passed, the blob's data is permanently +// removed from the storage account. Note that you continue to be charged for the soft-deleted blob's storage until it +// is permanently removed. Use the List Blobs API and specify the "include=deleted" query parameter to discover which +// blobs and snapshots have been soft deleted. You can then use the Undelete Blob API to restore a soft-deleted blob. +// All other operations on a soft-deleted blob or snapshot causes the service to return an HTTP status code of 404 +// (ResourceNotFound). +// +// snapshot is the snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to +// retrieve. For more information on working with blob snapshots, see Creating +// a Snapshot of a Blob. timeout is the timeout parameter is expressed in seconds. For more information, see Setting +// Timeouts for Blob Service Operations. leaseID is if specified, the operation only succeeds if the container's +// lease is active and matches this ID. deleteSnapshots is required if the blob has associated snapshots. Specify one +// of the following two options: include: Delete the base blob and all of its snapshots. only: Delete only the blob's +// snapshots and not the blob itself ifModifiedSince is specify this header value to operate only on a blob if it has +// been modified since the specified date/time. ifUnmodifiedSince is specify this header value to operate only on a +// blob if it has not been modified since the specified date/time. ifMatches is specify an ETag value to operate only +// on blobs with a matching value. ifNoneMatch is specify an ETag value to operate only on blobs without a matching +// value. requestID is provides a client-generated, opaque value with a 1 KB character limit that is recorded in the +// analytics logs when storage analytics logging is enabled. +func (client blobClient) Delete(ctx context.Context, snapshot *string, timeout *int32, leaseID *string, deleteSnapshots DeleteSnapshotsOptionType, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatches *ETag, ifNoneMatch *ETag, requestID *string) (*BlobDeleteResponse, error) { + if err := validate([]validation{ + {targetValue: timeout, + constraints: []constraint{{target: "timeout", name: null, rule: false, + chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { + return nil, err + } + req, err := client.deletePreparer(snapshot, timeout, leaseID, deleteSnapshots, ifModifiedSince, ifUnmodifiedSince, ifMatches, ifNoneMatch, requestID) + if err != nil { + return nil, err + } + resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.deleteResponder}, req) + if err != nil { + return nil, err + } + return resp.(*BlobDeleteResponse), err +} + +// deletePreparer prepares the Delete request. +func (client blobClient) deletePreparer(snapshot *string, timeout *int32, leaseID *string, deleteSnapshots DeleteSnapshotsOptionType, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatches *ETag, ifNoneMatch *ETag, requestID *string) (pipeline.Request, error) { + req, err := pipeline.NewRequest("DELETE", client.url, nil) + if err != nil { + return req, pipeline.NewError(err, "failed to create request") + } + params := req.URL.Query() + if snapshot != nil && len(*snapshot) > 0 { + params.Set("snapshot", *snapshot) + } + if timeout != nil { + params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) + } + req.URL.RawQuery = params.Encode() + if leaseID != nil { + req.Header.Set("x-ms-lease-id", *leaseID) + } + if deleteSnapshots != DeleteSnapshotsOptionNone { + req.Header.Set("x-ms-delete-snapshots", string(deleteSnapshots)) + } + if ifModifiedSince != nil { + req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123)) + } + if ifUnmodifiedSince != nil { + req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123)) + } + if ifMatches != nil { + req.Header.Set("If-Match", string(*ifMatches)) + } + if ifNoneMatch != nil { + req.Header.Set("If-None-Match", string(*ifNoneMatch)) + } + req.Header.Set("x-ms-version", ServiceVersion) + if requestID != nil { + req.Header.Set("x-ms-client-request-id", *requestID) + } + return req, nil +} + +// deleteResponder handles the response to the Delete request. +func (client blobClient) deleteResponder(resp pipeline.Response) (pipeline.Response, error) { + err := validateResponse(resp, http.StatusOK, http.StatusAccepted) + if resp == nil { + return nil, err + } + io.Copy(ioutil.Discard, resp.Response().Body) + resp.Response().Body.Close() + return &BlobDeleteResponse{rawResponse: resp.Response()}, err +} + +// Download the Download operation reads or downloads a blob from the system, including its metadata and properties. +// You can also call Download to read a snapshot. +// +// snapshot is the snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to +// retrieve. For more information on working with blob snapshots, see Creating +// a Snapshot of a Blob. timeout is the timeout parameter is expressed in seconds. For more information, see Setting +// Timeouts for Blob Service Operations. rangeParameter is return only the bytes of the blob in the specified +// range. leaseID is if specified, the operation only succeeds if the container's lease is active and matches this ID. +// rangeGetContentMD5 is when set to true and specified together with the Range, the service returns the MD5 hash for +// the range, as long as the range is less than or equal to 4 MB in size. ifModifiedSince is specify this header value +// to operate only on a blob if it has been modified since the specified date/time. ifUnmodifiedSince is specify this +// header value to operate only on a blob if it has not been modified since the specified date/time. ifMatches is +// specify an ETag value to operate only on blobs with a matching value. ifNoneMatch is specify an ETag value to +// operate only on blobs without a matching value. requestID is provides a client-generated, opaque value with a 1 KB +// character limit that is recorded in the analytics logs when storage analytics logging is enabled. +func (client blobClient) Download(ctx context.Context, snapshot *string, timeout *int32, rangeParameter *string, leaseID *string, rangeGetContentMD5 *bool, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatches *ETag, ifNoneMatch *ETag, requestID *string) (*downloadResponse, error) { + if err := validate([]validation{ + {targetValue: timeout, + constraints: []constraint{{target: "timeout", name: null, rule: false, + chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { + return nil, err + } + req, err := client.downloadPreparer(snapshot, timeout, rangeParameter, leaseID, rangeGetContentMD5, ifModifiedSince, ifUnmodifiedSince, ifMatches, ifNoneMatch, requestID) + if err != nil { + return nil, err + } + resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.downloadResponder}, req) + if err != nil { + return nil, err + } + return resp.(*downloadResponse), err +} + +// downloadPreparer prepares the Download request. +func (client blobClient) downloadPreparer(snapshot *string, timeout *int32, rangeParameter *string, leaseID *string, rangeGetContentMD5 *bool, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatches *ETag, ifNoneMatch *ETag, requestID *string) (pipeline.Request, error) { + req, err := pipeline.NewRequest("GET", client.url, nil) + if err != nil { + return req, pipeline.NewError(err, "failed to create request") + } + params := req.URL.Query() + if snapshot != nil && len(*snapshot) > 0 { + params.Set("snapshot", *snapshot) + } + if timeout != nil { + params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) + } + req.URL.RawQuery = params.Encode() + if rangeParameter != nil { + req.Header.Set("x-ms-range", *rangeParameter) + } + if leaseID != nil { + req.Header.Set("x-ms-lease-id", *leaseID) + } + if rangeGetContentMD5 != nil { + req.Header.Set("x-ms-range-get-content-md5", strconv.FormatBool(*rangeGetContentMD5)) + } + if ifModifiedSince != nil { + req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123)) + } + if ifUnmodifiedSince != nil { + req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123)) + } + if ifMatches != nil { + req.Header.Set("If-Match", string(*ifMatches)) + } + if ifNoneMatch != nil { + req.Header.Set("If-None-Match", string(*ifNoneMatch)) + } + req.Header.Set("x-ms-version", ServiceVersion) + if requestID != nil { + req.Header.Set("x-ms-client-request-id", *requestID) + } + return req, nil +} + +// downloadResponder handles the response to the Download request. +func (client blobClient) downloadResponder(resp pipeline.Response) (pipeline.Response, error) { + err := validateResponse(resp, http.StatusOK, http.StatusPartialContent) + if resp == nil { + return nil, err + } + return &downloadResponse{rawResponse: resp.Response()}, err +} + +// GetProperties the Get Properties operation returns all user-defined metadata, standard HTTP properties, and system +// properties for the blob. It does not return the content of the blob. +// +// snapshot is the snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to +// retrieve. For more information on working with blob snapshots, see Creating +// a Snapshot of a Blob. timeout is the timeout parameter is expressed in seconds. For more information, see Setting +// Timeouts for Blob Service Operations. leaseID is if specified, the operation only succeeds if the container's +// lease is active and matches this ID. ifModifiedSince is specify this header value to operate only on a blob if it +// has been modified since the specified date/time. ifUnmodifiedSince is specify this header value to operate only on a +// blob if it has not been modified since the specified date/time. ifMatches is specify an ETag value to operate only +// on blobs with a matching value. ifNoneMatch is specify an ETag value to operate only on blobs without a matching +// value. requestID is provides a client-generated, opaque value with a 1 KB character limit that is recorded in the +// analytics logs when storage analytics logging is enabled. +func (client blobClient) GetProperties(ctx context.Context, snapshot *string, timeout *int32, leaseID *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatches *ETag, ifNoneMatch *ETag, requestID *string) (*BlobGetPropertiesResponse, error) { + if err := validate([]validation{ + {targetValue: timeout, + constraints: []constraint{{target: "timeout", name: null, rule: false, + chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { + return nil, err + } + req, err := client.getPropertiesPreparer(snapshot, timeout, leaseID, ifModifiedSince, ifUnmodifiedSince, ifMatches, ifNoneMatch, requestID) + if err != nil { + return nil, err + } + resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.getPropertiesResponder}, req) + if err != nil { + return nil, err + } + return resp.(*BlobGetPropertiesResponse), err +} + +// getPropertiesPreparer prepares the GetProperties request. +func (client blobClient) getPropertiesPreparer(snapshot *string, timeout *int32, leaseID *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatches *ETag, ifNoneMatch *ETag, requestID *string) (pipeline.Request, error) { + req, err := pipeline.NewRequest("HEAD", client.url, nil) + if err != nil { + return req, pipeline.NewError(err, "failed to create request") + } + params := req.URL.Query() + if snapshot != nil && len(*snapshot) > 0 { + params.Set("snapshot", *snapshot) + } + if timeout != nil { + params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) + } + req.URL.RawQuery = params.Encode() + if leaseID != nil { + req.Header.Set("x-ms-lease-id", *leaseID) + } + if ifModifiedSince != nil { + req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123)) + } + if ifUnmodifiedSince != nil { + req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123)) + } + if ifMatches != nil { + req.Header.Set("If-Match", string(*ifMatches)) + } + if ifNoneMatch != nil { + req.Header.Set("If-None-Match", string(*ifNoneMatch)) + } + req.Header.Set("x-ms-version", ServiceVersion) + if requestID != nil { + req.Header.Set("x-ms-client-request-id", *requestID) + } + return req, nil +} + +// getPropertiesResponder handles the response to the GetProperties request. +func (client blobClient) getPropertiesResponder(resp pipeline.Response) (pipeline.Response, error) { + err := validateResponse(resp, http.StatusOK) + if resp == nil { + return nil, err + } + io.Copy(ioutil.Discard, resp.Response().Body) + resp.Response().Body.Close() + return &BlobGetPropertiesResponse{rawResponse: resp.Response()}, err +} + +// ReleaseLease [Update] The Lease Blob operation establishes and manages a lock on a blob for write and delete +// operations +// +// leaseID is if specified, the operation only succeeds if the container's lease is active and matches this ID. timeout +// is the timeout parameter is expressed in seconds. For more information, see Setting +// Timeouts for Blob Service Operations. ifModifiedSince is specify this header value to operate only on a blob if +// it has been modified since the specified date/time. ifUnmodifiedSince is specify this header value to operate only +// on a blob if it has not been modified since the specified date/time. ifMatches is specify an ETag value to operate +// only on blobs with a matching value. ifNoneMatch is specify an ETag value to operate only on blobs without a +// matching value. requestID is provides a client-generated, opaque value with a 1 KB character limit that is recorded +// in the analytics logs when storage analytics logging is enabled. +func (client blobClient) ReleaseLease(ctx context.Context, leaseID string, timeout *int32, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatches *ETag, ifNoneMatch *ETag, requestID *string) (*BlobReleaseLeaseResponse, error) { + if err := validate([]validation{ + {targetValue: timeout, + constraints: []constraint{{target: "timeout", name: null, rule: false, + chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { + return nil, err + } + req, err := client.releaseLeasePreparer(leaseID, timeout, ifModifiedSince, ifUnmodifiedSince, ifMatches, ifNoneMatch, requestID) + if err != nil { + return nil, err + } + resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.releaseLeaseResponder}, req) + if err != nil { + return nil, err + } + return resp.(*BlobReleaseLeaseResponse), err +} + +// releaseLeasePreparer prepares the ReleaseLease request. +func (client blobClient) releaseLeasePreparer(leaseID string, timeout *int32, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatches *ETag, ifNoneMatch *ETag, requestID *string) (pipeline.Request, error) { + req, err := pipeline.NewRequest("PUT", client.url, nil) + if err != nil { + return req, pipeline.NewError(err, "failed to create request") + } + params := req.URL.Query() + if timeout != nil { + params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) + } + params.Set("comp", "lease") + req.URL.RawQuery = params.Encode() + req.Header.Set("x-ms-lease-id", leaseID) + if ifModifiedSince != nil { + req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123)) + } + if ifUnmodifiedSince != nil { + req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123)) + } + if ifMatches != nil { + req.Header.Set("If-Match", string(*ifMatches)) + } + if ifNoneMatch != nil { + req.Header.Set("If-None-Match", string(*ifNoneMatch)) + } + req.Header.Set("x-ms-version", ServiceVersion) + if requestID != nil { + req.Header.Set("x-ms-client-request-id", *requestID) + } + req.Header.Set("x-ms-lease-action", "release") + return req, nil +} + +// releaseLeaseResponder handles the response to the ReleaseLease request. +func (client blobClient) releaseLeaseResponder(resp pipeline.Response) (pipeline.Response, error) { + err := validateResponse(resp, http.StatusOK) + if resp == nil { + return nil, err + } + io.Copy(ioutil.Discard, resp.Response().Body) + resp.Response().Body.Close() + return &BlobReleaseLeaseResponse{rawResponse: resp.Response()}, err +} + +// RenewLease [Update] The Lease Blob operation establishes and manages a lock on a blob for write and delete +// operations +// +// leaseID is if specified, the operation only succeeds if the container's lease is active and matches this ID. timeout +// is the timeout parameter is expressed in seconds. For more information, see Setting +// Timeouts for Blob Service Operations. ifModifiedSince is specify this header value to operate only on a blob if +// it has been modified since the specified date/time. ifUnmodifiedSince is specify this header value to operate only +// on a blob if it has not been modified since the specified date/time. ifMatches is specify an ETag value to operate +// only on blobs with a matching value. ifNoneMatch is specify an ETag value to operate only on blobs without a +// matching value. requestID is provides a client-generated, opaque value with a 1 KB character limit that is recorded +// in the analytics logs when storage analytics logging is enabled. +func (client blobClient) RenewLease(ctx context.Context, leaseID string, timeout *int32, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatches *ETag, ifNoneMatch *ETag, requestID *string) (*BlobRenewLeaseResponse, error) { + if err := validate([]validation{ + {targetValue: timeout, + constraints: []constraint{{target: "timeout", name: null, rule: false, + chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { + return nil, err + } + req, err := client.renewLeasePreparer(leaseID, timeout, ifModifiedSince, ifUnmodifiedSince, ifMatches, ifNoneMatch, requestID) + if err != nil { + return nil, err + } + resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.renewLeaseResponder}, req) + if err != nil { + return nil, err + } + return resp.(*BlobRenewLeaseResponse), err +} + +// renewLeasePreparer prepares the RenewLease request. +func (client blobClient) renewLeasePreparer(leaseID string, timeout *int32, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatches *ETag, ifNoneMatch *ETag, requestID *string) (pipeline.Request, error) { + req, err := pipeline.NewRequest("PUT", client.url, nil) + if err != nil { + return req, pipeline.NewError(err, "failed to create request") + } + params := req.URL.Query() + if timeout != nil { + params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) + } + params.Set("comp", "lease") + req.URL.RawQuery = params.Encode() + req.Header.Set("x-ms-lease-id", leaseID) + if ifModifiedSince != nil { + req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123)) + } + if ifUnmodifiedSince != nil { + req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123)) + } + if ifMatches != nil { + req.Header.Set("If-Match", string(*ifMatches)) + } + if ifNoneMatch != nil { + req.Header.Set("If-None-Match", string(*ifNoneMatch)) + } + req.Header.Set("x-ms-version", ServiceVersion) + if requestID != nil { + req.Header.Set("x-ms-client-request-id", *requestID) + } + req.Header.Set("x-ms-lease-action", "renew") + return req, nil +} + +// renewLeaseResponder handles the response to the RenewLease request. +func (client blobClient) renewLeaseResponder(resp pipeline.Response) (pipeline.Response, error) { + err := validateResponse(resp, http.StatusOK) + if resp == nil { + return nil, err + } + io.Copy(ioutil.Discard, resp.Response().Body) + resp.Response().Body.Close() + return &BlobRenewLeaseResponse{rawResponse: resp.Response()}, err +} + +// SetHTTPHeaders the Set HTTP Headers operation sets system properties on the blob +// +// timeout is the timeout parameter is expressed in seconds. For more information, see Setting +// Timeouts for Blob Service Operations. blobCacheControl is optional. Sets the blob's cache control. If specified, +// this property is stored with the blob and returned with a read request. blobContentType is optional. Sets the blob's +// content type. If specified, this property is stored with the blob and returned with a read request. blobContentMD5 +// is optional. An MD5 hash of the blob content. Note that this hash is not validated, as the hashes for the individual +// blocks were validated when each was uploaded. blobContentEncoding is optional. Sets the blob's content encoding. If +// specified, this property is stored with the blob and returned with a read request. blobContentLanguage is optional. +// Set the blob's content language. If specified, this property is stored with the blob and returned with a read +// request. leaseID is if specified, the operation only succeeds if the container's lease is active and matches this +// ID. ifModifiedSince is specify this header value to operate only on a blob if it has been modified since the +// specified date/time. ifUnmodifiedSince is specify this header value to operate only on a blob if it has not been +// modified since the specified date/time. ifMatches is specify an ETag value to operate only on blobs with a matching +// value. ifNoneMatch is specify an ETag value to operate only on blobs without a matching value. +// blobContentDisposition is optional. Sets the blob's Content-Disposition header. requestID is provides a +// client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage +// analytics logging is enabled. +func (client blobClient) SetHTTPHeaders(ctx context.Context, timeout *int32, blobCacheControl *string, blobContentType *string, blobContentMD5 []byte, blobContentEncoding *string, blobContentLanguage *string, leaseID *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatches *ETag, ifNoneMatch *ETag, blobContentDisposition *string, requestID *string) (*BlobSetHTTPHeadersResponse, error) { + if err := validate([]validation{ + {targetValue: timeout, + constraints: []constraint{{target: "timeout", name: null, rule: false, + chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { + return nil, err + } + req, err := client.setHTTPHeadersPreparer(timeout, blobCacheControl, blobContentType, blobContentMD5, blobContentEncoding, blobContentLanguage, leaseID, ifModifiedSince, ifUnmodifiedSince, ifMatches, ifNoneMatch, blobContentDisposition, requestID) + if err != nil { + return nil, err + } + resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.setHTTPHeadersResponder}, req) + if err != nil { + return nil, err + } + return resp.(*BlobSetHTTPHeadersResponse), err +} + +// setHTTPHeadersPreparer prepares the SetHTTPHeaders request. +func (client blobClient) setHTTPHeadersPreparer(timeout *int32, blobCacheControl *string, blobContentType *string, blobContentMD5 []byte, blobContentEncoding *string, blobContentLanguage *string, leaseID *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatches *ETag, ifNoneMatch *ETag, blobContentDisposition *string, requestID *string) (pipeline.Request, error) { + req, err := pipeline.NewRequest("PUT", client.url, nil) + if err != nil { + return req, pipeline.NewError(err, "failed to create request") + } + params := req.URL.Query() + if timeout != nil { + params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) + } + params.Set("comp", "properties") + req.URL.RawQuery = params.Encode() + if blobCacheControl != nil { + req.Header.Set("x-ms-blob-cache-control", *blobCacheControl) + } + if blobContentType != nil { + req.Header.Set("x-ms-blob-content-type", *blobContentType) + } + if blobContentMD5 != nil { + req.Header.Set("x-ms-blob-content-md5", base64.StdEncoding.EncodeToString(blobContentMD5)) + } + if blobContentEncoding != nil { + req.Header.Set("x-ms-blob-content-encoding", *blobContentEncoding) + } + if blobContentLanguage != nil { + req.Header.Set("x-ms-blob-content-language", *blobContentLanguage) + } + if leaseID != nil { + req.Header.Set("x-ms-lease-id", *leaseID) + } + if ifModifiedSince != nil { + req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123)) + } + if ifUnmodifiedSince != nil { + req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123)) + } + if ifMatches != nil { + req.Header.Set("If-Match", string(*ifMatches)) + } + if ifNoneMatch != nil { + req.Header.Set("If-None-Match", string(*ifNoneMatch)) + } + if blobContentDisposition != nil { + req.Header.Set("x-ms-blob-content-disposition", *blobContentDisposition) + } + req.Header.Set("x-ms-version", ServiceVersion) + if requestID != nil { + req.Header.Set("x-ms-client-request-id", *requestID) + } + return req, nil +} + +// setHTTPHeadersResponder handles the response to the SetHTTPHeaders request. +func (client blobClient) setHTTPHeadersResponder(resp pipeline.Response) (pipeline.Response, error) { + err := validateResponse(resp, http.StatusOK) + if resp == nil { + return nil, err + } + io.Copy(ioutil.Discard, resp.Response().Body) + resp.Response().Body.Close() + return &BlobSetHTTPHeadersResponse{rawResponse: resp.Response()}, err +} + +// SetMetadata the Set Blob Metadata operation sets user-defined metadata for the specified blob as one or more +// name-value pairs +// +// timeout is the timeout parameter is expressed in seconds. For more information, see Setting +// Timeouts for Blob Service Operations. metadata is optional. Specifies a user-defined name-value pair associated +// with the blob. If no name-value pairs are specified, the operation will copy the metadata from the source blob or +// file to the destination blob. If one or more name-value pairs are specified, the destination blob is created with +// the specified metadata, and metadata is not copied from the source blob or file. Note that beginning with version +// 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing +// Containers, Blobs, and Metadata for more information. leaseID is if specified, the operation only succeeds if the +// container's lease is active and matches this ID. ifModifiedSince is specify this header value to operate only on a +// blob if it has been modified since the specified date/time. ifUnmodifiedSince is specify this header value to +// operate only on a blob if it has not been modified since the specified date/time. ifMatches is specify an ETag value +// to operate only on blobs with a matching value. ifNoneMatch is specify an ETag value to operate only on blobs +// without a matching value. requestID is provides a client-generated, opaque value with a 1 KB character limit that is +// recorded in the analytics logs when storage analytics logging is enabled. +func (client blobClient) SetMetadata(ctx context.Context, timeout *int32, metadata map[string]string, leaseID *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatches *ETag, ifNoneMatch *ETag, requestID *string) (*BlobSetMetadataResponse, error) { + if err := validate([]validation{ + {targetValue: timeout, + constraints: []constraint{{target: "timeout", name: null, rule: false, + chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}, + {targetValue: metadata, + constraints: []constraint{{target: "metadata", name: null, rule: false, + chain: []constraint{{target: "metadata", name: pattern, rule: `^[a-zA-Z]+$`, chain: nil}}}}}}); err != nil { + return nil, err + } + req, err := client.setMetadataPreparer(timeout, metadata, leaseID, ifModifiedSince, ifUnmodifiedSince, ifMatches, ifNoneMatch, requestID) + if err != nil { + return nil, err + } + resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.setMetadataResponder}, req) + if err != nil { + return nil, err + } + return resp.(*BlobSetMetadataResponse), err +} + +// setMetadataPreparer prepares the SetMetadata request. +func (client blobClient) setMetadataPreparer(timeout *int32, metadata map[string]string, leaseID *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatches *ETag, ifNoneMatch *ETag, requestID *string) (pipeline.Request, error) { + req, err := pipeline.NewRequest("PUT", client.url, nil) + if err != nil { + return req, pipeline.NewError(err, "failed to create request") + } + params := req.URL.Query() + if timeout != nil { + params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) + } + params.Set("comp", "metadata") + req.URL.RawQuery = params.Encode() + if metadata != nil { + for k, v := range metadata { + req.Header.Set("x-ms-meta-"+k, v) + } + } + if leaseID != nil { + req.Header.Set("x-ms-lease-id", *leaseID) + } + if ifModifiedSince != nil { + req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123)) + } + if ifUnmodifiedSince != nil { + req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123)) + } + if ifMatches != nil { + req.Header.Set("If-Match", string(*ifMatches)) + } + if ifNoneMatch != nil { + req.Header.Set("If-None-Match", string(*ifNoneMatch)) + } + req.Header.Set("x-ms-version", ServiceVersion) + if requestID != nil { + req.Header.Set("x-ms-client-request-id", *requestID) + } + return req, nil +} + +// setMetadataResponder handles the response to the SetMetadata request. +func (client blobClient) setMetadataResponder(resp pipeline.Response) (pipeline.Response, error) { + err := validateResponse(resp, http.StatusOK) + if resp == nil { + return nil, err + } + io.Copy(ioutil.Discard, resp.Response().Body) + resp.Response().Body.Close() + return &BlobSetMetadataResponse{rawResponse: resp.Response()}, err +} + +// SetTier the Set Tier operation sets the tier on a blob. The operation is allowed on a page blob in a premium storage +// account and on a block blob in a blob storage account (locally redundant storage only). A premium page blob's tier +// determines the allowed size, IOPS, and bandwidth of the blob. A block blob's tier determines Hot/Cool/Archive +// storage type. This operation does not update the blob's ETag. +// +// tier is indicates the tier to be set on the blob. timeout is the timeout parameter is expressed in seconds. For more +// information, see Setting +// Timeouts for Blob Service Operations. requestID is provides a client-generated, opaque value with a 1 KB +// character limit that is recorded in the analytics logs when storage analytics logging is enabled. +func (client blobClient) SetTier(ctx context.Context, tier AccessTierType, timeout *int32, requestID *string) (*BlobSetTierResponse, error) { + if err := validate([]validation{ + {targetValue: timeout, + constraints: []constraint{{target: "timeout", name: null, rule: false, + chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { + return nil, err + } + req, err := client.setTierPreparer(tier, timeout, requestID) + if err != nil { + return nil, err + } + resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.setTierResponder}, req) + if err != nil { + return nil, err + } + return resp.(*BlobSetTierResponse), err +} + +// setTierPreparer prepares the SetTier request. +func (client blobClient) setTierPreparer(tier AccessTierType, timeout *int32, requestID *string) (pipeline.Request, error) { + req, err := pipeline.NewRequest("PUT", client.url, nil) + if err != nil { + return req, pipeline.NewError(err, "failed to create request") + } + params := req.URL.Query() + if timeout != nil { + params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) + } + params.Set("comp", "tier") + req.URL.RawQuery = params.Encode() + req.Header.Set("x-ms-access-tier", string(tier)) + req.Header.Set("x-ms-version", ServiceVersion) + if requestID != nil { + req.Header.Set("x-ms-client-request-id", *requestID) + } + return req, nil +} + +// setTierResponder handles the response to the SetTier request. +func (client blobClient) setTierResponder(resp pipeline.Response) (pipeline.Response, error) { + err := validateResponse(resp, http.StatusOK, http.StatusAccepted) + if resp == nil { + return nil, err + } + io.Copy(ioutil.Discard, resp.Response().Body) + resp.Response().Body.Close() + return &BlobSetTierResponse{rawResponse: resp.Response()}, err +} + +// StartCopyFromURL the Start Copy From URL operation copies a blob or an internet resource to a new blob. +// +// copySource is specifies the name of the source page blob snapshot. This value is a URL of up to 2 KB in length that +// specifies a page blob snapshot. The value should be URL-encoded as it would appear in a request URI. The source blob +// must either be public or must be authenticated via a shared access signature. timeout is the timeout parameter is +// expressed in seconds. For more information, see Setting +// Timeouts for Blob Service Operations. metadata is optional. Specifies a user-defined name-value pair associated +// with the blob. If no name-value pairs are specified, the operation will copy the metadata from the source blob or +// file to the destination blob. If one or more name-value pairs are specified, the destination blob is created with +// the specified metadata, and metadata is not copied from the source blob or file. Note that beginning with version +// 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing +// Containers, Blobs, and Metadata for more information. sourceIfModifiedSince is specify this header value to operate +// only on a blob if it has been modified since the specified date/time. sourceIfUnmodifiedSince is specify this header +// value to operate only on a blob if it has not been modified since the specified date/time. sourceIfMatches is +// specify an ETag value to operate only on blobs with a matching value. sourceIfNoneMatch is specify an ETag value to +// operate only on blobs without a matching value. ifModifiedSince is specify this header value to operate only on a +// blob if it has been modified since the specified date/time. ifUnmodifiedSince is specify this header value to +// operate only on a blob if it has not been modified since the specified date/time. ifMatches is specify an ETag value +// to operate only on blobs with a matching value. ifNoneMatch is specify an ETag value to operate only on blobs +// without a matching value. leaseID is if specified, the operation only succeeds if the container's lease is active +// and matches this ID. sourceLeaseID is specify this header to perform the operation only if the lease ID given +// matches the active lease ID of the source blob. requestID is provides a client-generated, opaque value with a 1 KB +// character limit that is recorded in the analytics logs when storage analytics logging is enabled. +func (client blobClient) StartCopyFromURL(ctx context.Context, copySource string, timeout *int32, metadata map[string]string, sourceIfModifiedSince *time.Time, sourceIfUnmodifiedSince *time.Time, sourceIfMatches *ETag, sourceIfNoneMatch *ETag, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatches *ETag, ifNoneMatch *ETag, leaseID *string, sourceLeaseID *string, requestID *string) (*BlobStartCopyFromURLResponse, error) { + if err := validate([]validation{ + {targetValue: timeout, + constraints: []constraint{{target: "timeout", name: null, rule: false, + chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}, + {targetValue: metadata, + constraints: []constraint{{target: "metadata", name: null, rule: false, + chain: []constraint{{target: "metadata", name: pattern, rule: `^[a-zA-Z]+$`, chain: nil}}}}}}); err != nil { + return nil, err + } + req, err := client.startCopyFromURLPreparer(copySource, timeout, metadata, sourceIfModifiedSince, sourceIfUnmodifiedSince, sourceIfMatches, sourceIfNoneMatch, ifModifiedSince, ifUnmodifiedSince, ifMatches, ifNoneMatch, leaseID, sourceLeaseID, requestID) + if err != nil { + return nil, err + } + resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.startCopyFromURLResponder}, req) + if err != nil { + return nil, err + } + return resp.(*BlobStartCopyFromURLResponse), err +} + +// startCopyFromURLPreparer prepares the StartCopyFromURL request. +func (client blobClient) startCopyFromURLPreparer(copySource string, timeout *int32, metadata map[string]string, sourceIfModifiedSince *time.Time, sourceIfUnmodifiedSince *time.Time, sourceIfMatches *ETag, sourceIfNoneMatch *ETag, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatches *ETag, ifNoneMatch *ETag, leaseID *string, sourceLeaseID *string, requestID *string) (pipeline.Request, error) { + req, err := pipeline.NewRequest("PUT", client.url, nil) + if err != nil { + return req, pipeline.NewError(err, "failed to create request") + } + params := req.URL.Query() + if timeout != nil { + params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) + } + req.URL.RawQuery = params.Encode() + if metadata != nil { + for k, v := range metadata { + req.Header.Set("x-ms-meta-"+k, v) + } + } + if sourceIfModifiedSince != nil { + req.Header.Set("x-ms-source-if-modified-since", (*sourceIfModifiedSince).In(gmt).Format(time.RFC1123)) + } + if sourceIfUnmodifiedSince != nil { + req.Header.Set("x-ms-source-if-unmodified-since", (*sourceIfUnmodifiedSince).In(gmt).Format(time.RFC1123)) + } + if sourceIfMatches != nil { + req.Header.Set("x-ms-source-if-match", string(*sourceIfMatches)) + } + if sourceIfNoneMatch != nil { + req.Header.Set("x-ms-source-if-none-match", string(*sourceIfNoneMatch)) + } + if ifModifiedSince != nil { + req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123)) + } + if ifUnmodifiedSince != nil { + req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123)) + } + if ifMatches != nil { + req.Header.Set("If-Match", string(*ifMatches)) + } + if ifNoneMatch != nil { + req.Header.Set("If-None-Match", string(*ifNoneMatch)) + } + req.Header.Set("x-ms-copy-source", copySource) + if leaseID != nil { + req.Header.Set("x-ms-lease-id", *leaseID) + } + if sourceLeaseID != nil { + req.Header.Set("x-ms-source-lease-id", *sourceLeaseID) + } + req.Header.Set("x-ms-version", ServiceVersion) + if requestID != nil { + req.Header.Set("x-ms-client-request-id", *requestID) + } + return req, nil +} + +// startCopyFromURLResponder handles the response to the StartCopyFromURL request. +func (client blobClient) startCopyFromURLResponder(resp pipeline.Response) (pipeline.Response, error) { + err := validateResponse(resp, http.StatusOK, http.StatusAccepted) + if resp == nil { + return nil, err + } + io.Copy(ioutil.Discard, resp.Response().Body) + resp.Response().Body.Close() + return &BlobStartCopyFromURLResponse{rawResponse: resp.Response()}, err +} + +// Undelete undelete a blob that was previously soft deleted +// +// timeout is the timeout parameter is expressed in seconds. For more information, see Setting +// Timeouts for Blob Service Operations. requestID is provides a client-generated, opaque value with a 1 KB +// character limit that is recorded in the analytics logs when storage analytics logging is enabled. +func (client blobClient) Undelete(ctx context.Context, timeout *int32, requestID *string) (*BlobUndeleteResponse, error) { + if err := validate([]validation{ + {targetValue: timeout, + constraints: []constraint{{target: "timeout", name: null, rule: false, + chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { + return nil, err + } + req, err := client.undeletePreparer(timeout, requestID) + if err != nil { + return nil, err + } + resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.undeleteResponder}, req) + if err != nil { + return nil, err + } + return resp.(*BlobUndeleteResponse), err +} + +// undeletePreparer prepares the Undelete request. +func (client blobClient) undeletePreparer(timeout *int32, requestID *string) (pipeline.Request, error) { + req, err := pipeline.NewRequest("PUT", client.url, nil) + if err != nil { + return req, pipeline.NewError(err, "failed to create request") + } + params := req.URL.Query() + if timeout != nil { + params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) + } + params.Set("comp", "undelete") + req.URL.RawQuery = params.Encode() + req.Header.Set("x-ms-version", ServiceVersion) + if requestID != nil { + req.Header.Set("x-ms-client-request-id", *requestID) + } + return req, nil +} + +// undeleteResponder handles the response to the Undelete request. +func (client blobClient) undeleteResponder(resp pipeline.Response) (pipeline.Response, error) { + err := validateResponse(resp, http.StatusOK) + if resp == nil { + return nil, err + } + io.Copy(ioutil.Discard, resp.Response().Body) + resp.Response().Body.Close() + return &BlobUndeleteResponse{rawResponse: resp.Response()}, err +} diff --git a/vendor/github.com/Azure/azure-storage-blob-go/2018-03-28/azblob/zz_generated_block_blob.go b/vendor/github.com/Azure/azure-storage-blob-go/2018-03-28/azblob/zz_generated_block_blob.go new file mode 100644 index 000000000000..4e75dcea337e --- /dev/null +++ b/vendor/github.com/Azure/azure-storage-blob-go/2018-03-28/azblob/zz_generated_block_blob.go @@ -0,0 +1,498 @@ +package azblob + +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "bytes" + "context" + "encoding/base64" + "encoding/xml" + "github.com/Azure/azure-pipeline-go/pipeline" + "io" + "io/ioutil" + "net/http" + "net/url" + "strconv" + "time" +) + +// blockBlobClient is the client for the BlockBlob methods of the Azblob service. +type blockBlobClient struct { + managementClient +} + +// newBlockBlobClient creates an instance of the blockBlobClient client. +func newBlockBlobClient(url url.URL, p pipeline.Pipeline) blockBlobClient { + return blockBlobClient{newManagementClient(url, p)} +} + +// CommitBlockList the Commit Block List operation writes a blob by specifying the list of block IDs that make up the +// blob. In order to be written as part of a blob, a block must have been successfully written to the server in a prior +// Put Block operation. You can call Put Block List to update a blob by uploading only those blocks that have changed, +// then committing the new and existing blocks together. You can do this by specifying whether to commit a block from +// the committed block list or from the uncommitted block list, or to commit the most recently uploaded version of the +// block, whichever list it may belong to. +// +// timeout is the timeout parameter is expressed in seconds. For more information, see Setting +// Timeouts for Blob Service Operations. blobCacheControl is optional. Sets the blob's cache control. If specified, +// this property is stored with the blob and returned with a read request. blobContentType is optional. Sets the blob's +// content type. If specified, this property is stored with the blob and returned with a read request. +// blobContentEncoding is optional. Sets the blob's content encoding. If specified, this property is stored with the +// blob and returned with a read request. blobContentLanguage is optional. Set the blob's content language. If +// specified, this property is stored with the blob and returned with a read request. blobContentMD5 is optional. An +// MD5 hash of the blob content. Note that this hash is not validated, as the hashes for the individual blocks were +// validated when each was uploaded. metadata is optional. Specifies a user-defined name-value pair associated with the +// blob. If no name-value pairs are specified, the operation will copy the metadata from the source blob or file to the +// destination blob. If one or more name-value pairs are specified, the destination blob is created with the specified +// metadata, and metadata is not copied from the source blob or file. Note that beginning with version 2009-09-19, +// metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, Blobs, and +// Metadata for more information. leaseID is if specified, the operation only succeeds if the container's lease is +// active and matches this ID. blobContentDisposition is optional. Sets the blob's Content-Disposition header. +// ifModifiedSince is specify this header value to operate only on a blob if it has been modified since the specified +// date/time. ifUnmodifiedSince is specify this header value to operate only on a blob if it has not been modified +// since the specified date/time. ifMatches is specify an ETag value to operate only on blobs with a matching value. +// ifNoneMatch is specify an ETag value to operate only on blobs without a matching value. requestID is provides a +// client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage +// analytics logging is enabled. +func (client blockBlobClient) CommitBlockList(ctx context.Context, blocks BlockLookupList, timeout *int32, blobCacheControl *string, blobContentType *string, blobContentEncoding *string, blobContentLanguage *string, blobContentMD5 []byte, metadata map[string]string, leaseID *string, blobContentDisposition *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatches *ETag, ifNoneMatch *ETag, requestID *string) (*BlockBlobCommitBlockListResponse, error) { + if err := validate([]validation{ + {targetValue: timeout, + constraints: []constraint{{target: "timeout", name: null, rule: false, + chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}, + {targetValue: metadata, + constraints: []constraint{{target: "metadata", name: null, rule: false, + chain: []constraint{{target: "metadata", name: pattern, rule: `^[a-zA-Z]+$`, chain: nil}}}}}}); err != nil { + return nil, err + } + req, err := client.commitBlockListPreparer(blocks, timeout, blobCacheControl, blobContentType, blobContentEncoding, blobContentLanguage, blobContentMD5, metadata, leaseID, blobContentDisposition, ifModifiedSince, ifUnmodifiedSince, ifMatches, ifNoneMatch, requestID) + if err != nil { + return nil, err + } + resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.commitBlockListResponder}, req) + if err != nil { + return nil, err + } + return resp.(*BlockBlobCommitBlockListResponse), err +} + +// commitBlockListPreparer prepares the CommitBlockList request. +func (client blockBlobClient) commitBlockListPreparer(blocks BlockLookupList, timeout *int32, blobCacheControl *string, blobContentType *string, blobContentEncoding *string, blobContentLanguage *string, blobContentMD5 []byte, metadata map[string]string, leaseID *string, blobContentDisposition *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatches *ETag, ifNoneMatch *ETag, requestID *string) (pipeline.Request, error) { + req, err := pipeline.NewRequest("PUT", client.url, nil) + if err != nil { + return req, pipeline.NewError(err, "failed to create request") + } + params := req.URL.Query() + if timeout != nil { + params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) + } + params.Set("comp", "blocklist") + req.URL.RawQuery = params.Encode() + if blobCacheControl != nil { + req.Header.Set("x-ms-blob-cache-control", *blobCacheControl) + } + if blobContentType != nil { + req.Header.Set("x-ms-blob-content-type", *blobContentType) + } + if blobContentEncoding != nil { + req.Header.Set("x-ms-blob-content-encoding", *blobContentEncoding) + } + if blobContentLanguage != nil { + req.Header.Set("x-ms-blob-content-language", *blobContentLanguage) + } + if blobContentMD5 != nil { + req.Header.Set("x-ms-blob-content-md5", base64.StdEncoding.EncodeToString(blobContentMD5)) + } + if metadata != nil { + for k, v := range metadata { + req.Header.Set("x-ms-meta-"+k, v) + } + } + if leaseID != nil { + req.Header.Set("x-ms-lease-id", *leaseID) + } + if blobContentDisposition != nil { + req.Header.Set("x-ms-blob-content-disposition", *blobContentDisposition) + } + if ifModifiedSince != nil { + req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123)) + } + if ifUnmodifiedSince != nil { + req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123)) + } + if ifMatches != nil { + req.Header.Set("If-Match", string(*ifMatches)) + } + if ifNoneMatch != nil { + req.Header.Set("If-None-Match", string(*ifNoneMatch)) + } + req.Header.Set("x-ms-version", ServiceVersion) + if requestID != nil { + req.Header.Set("x-ms-client-request-id", *requestID) + } + b, err := xml.Marshal(blocks) + if err != nil { + return req, pipeline.NewError(err, "failed to marshal request body") + } + req.Header.Set("Content-Type", "application/xml") + err = req.SetBody(bytes.NewReader(b)) + if err != nil { + return req, pipeline.NewError(err, "failed to set request body") + } + return req, nil +} + +// commitBlockListResponder handles the response to the CommitBlockList request. +func (client blockBlobClient) commitBlockListResponder(resp pipeline.Response) (pipeline.Response, error) { + err := validateResponse(resp, http.StatusOK, http.StatusCreated) + if resp == nil { + return nil, err + } + io.Copy(ioutil.Discard, resp.Response().Body) + resp.Response().Body.Close() + return &BlockBlobCommitBlockListResponse{rawResponse: resp.Response()}, err +} + +// GetBlockList the Get Block List operation retrieves the list of blocks that have been uploaded as part of a block +// blob +// +// listType is specifies whether to return the list of committed blocks, the list of uncommitted blocks, or both lists +// together. snapshot is the snapshot parameter is an opaque DateTime value that, when present, specifies the blob +// snapshot to retrieve. For more information on working with blob snapshots, see Creating +// a Snapshot of a Blob. timeout is the timeout parameter is expressed in seconds. For more information, see Setting +// Timeouts for Blob Service Operations. leaseID is if specified, the operation only succeeds if the container's +// lease is active and matches this ID. requestID is provides a client-generated, opaque value with a 1 KB character +// limit that is recorded in the analytics logs when storage analytics logging is enabled. +func (client blockBlobClient) GetBlockList(ctx context.Context, listType BlockListType, snapshot *string, timeout *int32, leaseID *string, requestID *string) (*BlockList, error) { + if err := validate([]validation{ + {targetValue: timeout, + constraints: []constraint{{target: "timeout", name: null, rule: false, + chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { + return nil, err + } + req, err := client.getBlockListPreparer(listType, snapshot, timeout, leaseID, requestID) + if err != nil { + return nil, err + } + resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.getBlockListResponder}, req) + if err != nil { + return nil, err + } + return resp.(*BlockList), err +} + +// getBlockListPreparer prepares the GetBlockList request. +func (client blockBlobClient) getBlockListPreparer(listType BlockListType, snapshot *string, timeout *int32, leaseID *string, requestID *string) (pipeline.Request, error) { + req, err := pipeline.NewRequest("GET", client.url, nil) + if err != nil { + return req, pipeline.NewError(err, "failed to create request") + } + params := req.URL.Query() + if snapshot != nil && len(*snapshot) > 0 { + params.Set("snapshot", *snapshot) + } + params.Set("blocklisttype", string(listType)) + if timeout != nil { + params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) + } + params.Set("comp", "blocklist") + req.URL.RawQuery = params.Encode() + if leaseID != nil { + req.Header.Set("x-ms-lease-id", *leaseID) + } + req.Header.Set("x-ms-version", ServiceVersion) + if requestID != nil { + req.Header.Set("x-ms-client-request-id", *requestID) + } + return req, nil +} + +// getBlockListResponder handles the response to the GetBlockList request. +func (client blockBlobClient) getBlockListResponder(resp pipeline.Response) (pipeline.Response, error) { + err := validateResponse(resp, http.StatusOK) + if resp == nil { + return nil, err + } + result := &BlockList{rawResponse: resp.Response()} + if err != nil { + return result, err + } + defer resp.Response().Body.Close() + b, err := ioutil.ReadAll(resp.Response().Body) + if err != nil { + return result, NewResponseError(err, resp.Response(), "failed to read response body") + } + if len(b) > 0 { + b = removeBOM(b) + err = xml.Unmarshal(b, result) + if err != nil { + return result, NewResponseError(err, resp.Response(), "failed to unmarshal response body") + } + } + return result, nil +} + +// StageBlock the Stage Block operation creates a new block to be committed as part of a blob +// +// blockID is a valid Base64 string value that identifies the block. Prior to encoding, the string must be less than or +// equal to 64 bytes in size. For a given blob, the length of the value specified for the blockid parameter must be the +// same size for each block. contentLength is the length of the request. body is initial data body will be closed upon +// successful return. Callers should ensure closure when receiving an error.timeout is the timeout parameter is +// expressed in seconds. For more information, see Setting +// Timeouts for Blob Service Operations. leaseID is if specified, the operation only succeeds if the container's +// lease is active and matches this ID. requestID is provides a client-generated, opaque value with a 1 KB character +// limit that is recorded in the analytics logs when storage analytics logging is enabled. +func (client blockBlobClient) StageBlock(ctx context.Context, blockID string, contentLength int64, body io.ReadSeeker, timeout *int32, leaseID *string, requestID *string) (*BlockBlobStageBlockResponse, error) { + if err := validate([]validation{ + {targetValue: body, + constraints: []constraint{{target: "body", name: null, rule: true, chain: nil}}}, + {targetValue: timeout, + constraints: []constraint{{target: "timeout", name: null, rule: false, + chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { + return nil, err + } + req, err := client.stageBlockPreparer(blockID, contentLength, body, timeout, leaseID, requestID) + if err != nil { + return nil, err + } + resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.stageBlockResponder}, req) + if err != nil { + return nil, err + } + return resp.(*BlockBlobStageBlockResponse), err +} + +// stageBlockPreparer prepares the StageBlock request. +func (client blockBlobClient) stageBlockPreparer(blockID string, contentLength int64, body io.ReadSeeker, timeout *int32, leaseID *string, requestID *string) (pipeline.Request, error) { + req, err := pipeline.NewRequest("PUT", client.url, body) + if err != nil { + return req, pipeline.NewError(err, "failed to create request") + } + params := req.URL.Query() + params.Set("blockid", blockID) + if timeout != nil { + params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) + } + params.Set("comp", "block") + req.URL.RawQuery = params.Encode() + req.Header.Set("Content-Length", strconv.FormatInt(contentLength, 10)) + if leaseID != nil { + req.Header.Set("x-ms-lease-id", *leaseID) + } + req.Header.Set("x-ms-version", ServiceVersion) + if requestID != nil { + req.Header.Set("x-ms-client-request-id", *requestID) + } + return req, nil +} + +// stageBlockResponder handles the response to the StageBlock request. +func (client blockBlobClient) stageBlockResponder(resp pipeline.Response) (pipeline.Response, error) { + err := validateResponse(resp, http.StatusOK, http.StatusCreated) + if resp == nil { + return nil, err + } + io.Copy(ioutil.Discard, resp.Response().Body) + resp.Response().Body.Close() + return &BlockBlobStageBlockResponse{rawResponse: resp.Response()}, err +} + +// StageBlockFromURL the Stage Block operation creates a new block to be committed as part of a blob where the contents +// are read from a URL. +// +// blockID is a valid Base64 string value that identifies the block. Prior to encoding, the string must be less than or +// equal to 64 bytes in size. For a given blob, the length of the value specified for the blockid parameter must be the +// same size for each block. contentLength is the length of the request. sourceURL is specifiy an URL to the copy +// source. sourceRange is bytes of source data in the specified range. sourceContentMD5 is specify the md5 calculated +// for the range of bytes that must be read from the copy source. timeout is the timeout parameter is expressed in +// seconds. For more information, see Setting +// Timeouts for Blob Service Operations. leaseID is if specified, the operation only succeeds if the container's +// lease is active and matches this ID. requestID is provides a client-generated, opaque value with a 1 KB character +// limit that is recorded in the analytics logs when storage analytics logging is enabled. +func (client blockBlobClient) StageBlockFromURL(ctx context.Context, blockID string, contentLength int64, sourceURL *string, sourceRange *string, sourceContentMD5 []byte, timeout *int32, leaseID *string, requestID *string) (*BlockBlobStageBlockFromURLResponse, error) { + if err := validate([]validation{ + {targetValue: timeout, + constraints: []constraint{{target: "timeout", name: null, rule: false, + chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { + return nil, err + } + req, err := client.stageBlockFromURLPreparer(blockID, contentLength, sourceURL, sourceRange, sourceContentMD5, timeout, leaseID, requestID) + if err != nil { + return nil, err + } + resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.stageBlockFromURLResponder}, req) + if err != nil { + return nil, err + } + return resp.(*BlockBlobStageBlockFromURLResponse), err +} + +// stageBlockFromURLPreparer prepares the StageBlockFromURL request. +func (client blockBlobClient) stageBlockFromURLPreparer(blockID string, contentLength int64, sourceURL *string, sourceRange *string, sourceContentMD5 []byte, timeout *int32, leaseID *string, requestID *string) (pipeline.Request, error) { + req, err := pipeline.NewRequest("PUT", client.url, nil) + if err != nil { + return req, pipeline.NewError(err, "failed to create request") + } + params := req.URL.Query() + params.Set("blockid", blockID) + if timeout != nil { + params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) + } + params.Set("comp", "block") + req.URL.RawQuery = params.Encode() + req.Header.Set("Content-Length", strconv.FormatInt(contentLength, 10)) + if sourceURL != nil { + req.Header.Set("x-ms-copy-source", *sourceURL) + } + if sourceRange != nil { + req.Header.Set("x-ms-source-range", *sourceRange) + } + if sourceContentMD5 != nil { + req.Header.Set("x-ms-source-content-md5", base64.StdEncoding.EncodeToString(sourceContentMD5)) + } + if leaseID != nil { + req.Header.Set("x-ms-lease-id", *leaseID) + } + req.Header.Set("x-ms-version", ServiceVersion) + if requestID != nil { + req.Header.Set("x-ms-client-request-id", *requestID) + } + return req, nil +} + +// stageBlockFromURLResponder handles the response to the StageBlockFromURL request. +func (client blockBlobClient) stageBlockFromURLResponder(resp pipeline.Response) (pipeline.Response, error) { + err := validateResponse(resp, http.StatusOK, http.StatusCreated) + if resp == nil { + return nil, err + } + io.Copy(ioutil.Discard, resp.Response().Body) + resp.Response().Body.Close() + return &BlockBlobStageBlockFromURLResponse{rawResponse: resp.Response()}, err +} + +// Upload the Upload Block Blob operation updates the content of an existing block blob. Updating an existing block +// blob overwrites any existing metadata on the blob. Partial updates are not supported with Put Blob; the content of +// the existing blob is overwritten with the content of the new blob. To perform a partial update of the content of a +// block blob, use the Put Block List operation. +// +// body is initial data body will be closed upon successful return. Callers should ensure closure when receiving an +// error.contentLength is the length of the request. timeout is the timeout parameter is expressed in seconds. For more +// information, see Setting +// Timeouts for Blob Service Operations. blobContentType is optional. Sets the blob's content type. If specified, +// this property is stored with the blob and returned with a read request. blobContentEncoding is optional. Sets the +// blob's content encoding. If specified, this property is stored with the blob and returned with a read request. +// blobContentLanguage is optional. Set the blob's content language. If specified, this property is stored with the +// blob and returned with a read request. blobContentMD5 is optional. An MD5 hash of the blob content. Note that this +// hash is not validated, as the hashes for the individual blocks were validated when each was uploaded. +// blobCacheControl is optional. Sets the blob's cache control. If specified, this property is stored with the blob and +// returned with a read request. metadata is optional. Specifies a user-defined name-value pair associated with the +// blob. If no name-value pairs are specified, the operation will copy the metadata from the source blob or file to the +// destination blob. If one or more name-value pairs are specified, the destination blob is created with the specified +// metadata, and metadata is not copied from the source blob or file. Note that beginning with version 2009-09-19, +// metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, Blobs, and +// Metadata for more information. leaseID is if specified, the operation only succeeds if the container's lease is +// active and matches this ID. blobContentDisposition is optional. Sets the blob's Content-Disposition header. +// ifModifiedSince is specify this header value to operate only on a blob if it has been modified since the specified +// date/time. ifUnmodifiedSince is specify this header value to operate only on a blob if it has not been modified +// since the specified date/time. ifMatches is specify an ETag value to operate only on blobs with a matching value. +// ifNoneMatch is specify an ETag value to operate only on blobs without a matching value. requestID is provides a +// client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage +// analytics logging is enabled. +func (client blockBlobClient) Upload(ctx context.Context, body io.ReadSeeker, contentLength int64, timeout *int32, blobContentType *string, blobContentEncoding *string, blobContentLanguage *string, blobContentMD5 []byte, blobCacheControl *string, metadata map[string]string, leaseID *string, blobContentDisposition *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatches *ETag, ifNoneMatch *ETag, requestID *string) (*BlockBlobUploadResponse, error) { + if err := validate([]validation{ + {targetValue: body, + constraints: []constraint{{target: "body", name: null, rule: true, chain: nil}}}, + {targetValue: timeout, + constraints: []constraint{{target: "timeout", name: null, rule: false, + chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}, + {targetValue: metadata, + constraints: []constraint{{target: "metadata", name: null, rule: false, + chain: []constraint{{target: "metadata", name: pattern, rule: `^[a-zA-Z]+$`, chain: nil}}}}}}); err != nil { + return nil, err + } + req, err := client.uploadPreparer(body, contentLength, timeout, blobContentType, blobContentEncoding, blobContentLanguage, blobContentMD5, blobCacheControl, metadata, leaseID, blobContentDisposition, ifModifiedSince, ifUnmodifiedSince, ifMatches, ifNoneMatch, requestID) + if err != nil { + return nil, err + } + resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.uploadResponder}, req) + if err != nil { + return nil, err + } + return resp.(*BlockBlobUploadResponse), err +} + +// uploadPreparer prepares the Upload request. +func (client blockBlobClient) uploadPreparer(body io.ReadSeeker, contentLength int64, timeout *int32, blobContentType *string, blobContentEncoding *string, blobContentLanguage *string, blobContentMD5 []byte, blobCacheControl *string, metadata map[string]string, leaseID *string, blobContentDisposition *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatches *ETag, ifNoneMatch *ETag, requestID *string) (pipeline.Request, error) { + req, err := pipeline.NewRequest("PUT", client.url, body) + if err != nil { + return req, pipeline.NewError(err, "failed to create request") + } + params := req.URL.Query() + if timeout != nil { + params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) + } + req.URL.RawQuery = params.Encode() + req.Header.Set("Content-Length", strconv.FormatInt(contentLength, 10)) + if blobContentType != nil { + req.Header.Set("x-ms-blob-content-type", *blobContentType) + } + if blobContentEncoding != nil { + req.Header.Set("x-ms-blob-content-encoding", *blobContentEncoding) + } + if blobContentLanguage != nil { + req.Header.Set("x-ms-blob-content-language", *blobContentLanguage) + } + if blobContentMD5 != nil { + req.Header.Set("x-ms-blob-content-md5", base64.StdEncoding.EncodeToString(blobContentMD5)) + } + if blobCacheControl != nil { + req.Header.Set("x-ms-blob-cache-control", *blobCacheControl) + } + if metadata != nil { + for k, v := range metadata { + req.Header.Set("x-ms-meta-"+k, v) + } + } + if leaseID != nil { + req.Header.Set("x-ms-lease-id", *leaseID) + } + if blobContentDisposition != nil { + req.Header.Set("x-ms-blob-content-disposition", *blobContentDisposition) + } + if ifModifiedSince != nil { + req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123)) + } + if ifUnmodifiedSince != nil { + req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123)) + } + if ifMatches != nil { + req.Header.Set("If-Match", string(*ifMatches)) + } + if ifNoneMatch != nil { + req.Header.Set("If-None-Match", string(*ifNoneMatch)) + } + req.Header.Set("x-ms-version", ServiceVersion) + if requestID != nil { + req.Header.Set("x-ms-client-request-id", *requestID) + } + req.Header.Set("x-ms-blob-type", "BlockBlob") + return req, nil +} + +// uploadResponder handles the response to the Upload request. +func (client blockBlobClient) uploadResponder(resp pipeline.Response) (pipeline.Response, error) { + err := validateResponse(resp, http.StatusOK, http.StatusCreated) + if resp == nil { + return nil, err + } + io.Copy(ioutil.Discard, resp.Response().Body) + resp.Response().Body.Close() + return &BlockBlobUploadResponse{rawResponse: resp.Response()}, err +} diff --git a/vendor/github.com/Azure/azure-storage-blob-go/2018-03-28/azblob/zz_generated_client.go b/vendor/github.com/Azure/azure-storage-blob-go/2018-03-28/azblob/zz_generated_client.go new file mode 100644 index 000000000000..b42a79b1571c --- /dev/null +++ b/vendor/github.com/Azure/azure-storage-blob-go/2018-03-28/azblob/zz_generated_client.go @@ -0,0 +1,38 @@ +package azblob + +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "github.com/Azure/azure-pipeline-go/pipeline" + "net/url" +) + +const ( + // ServiceVersion specifies the version of the operations used in this package. + ServiceVersion = "2018-03-28" +) + +// managementClient is the base client for Azblob. +type managementClient struct { + url url.URL + p pipeline.Pipeline +} + +// newManagementClient creates an instance of the managementClient client. +func newManagementClient(url url.URL, p pipeline.Pipeline) managementClient { + return managementClient{ + url: url, + p: p, + } +} + +// URL returns a copy of the URL for this client. +func (mc managementClient) URL() url.URL { + return mc.url +} + +// Pipeline returns the pipeline for this client. +func (mc managementClient) Pipeline() pipeline.Pipeline { + return mc.p +} diff --git a/vendor/github.com/Azure/azure-storage-blob-go/2018-03-28/azblob/zz_generated_container.go b/vendor/github.com/Azure/azure-storage-blob-go/2018-03-28/azblob/zz_generated_container.go new file mode 100644 index 000000000000..3e744fcbedb9 --- /dev/null +++ b/vendor/github.com/Azure/azure-storage-blob-go/2018-03-28/azblob/zz_generated_container.go @@ -0,0 +1,1006 @@ +package azblob + +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "bytes" + "context" + "encoding/xml" + "io" + "io/ioutil" + "net/http" + "net/url" + "strconv" + "time" + + "github.com/Azure/azure-pipeline-go/pipeline" +) + +// containerClient is the client for the Container methods of the Azblob service. +type containerClient struct { + managementClient +} + +// newContainerClient creates an instance of the containerClient client. +func newContainerClient(url url.URL, p pipeline.Pipeline) containerClient { + return containerClient{newManagementClient(url, p)} +} + +// AcquireLease [Update] establishes and manages a lock on a container for delete operations. The lock duration can be +// 15 to 60 seconds, or can be infinite +// +// timeout is the timeout parameter is expressed in seconds. For more information, see Setting +// Timeouts for Blob Service Operations. duration is specifies the duration of the lease, in seconds, or negative +// one (-1) for a lease that never expires. A non-infinite lease can be between 15 and 60 seconds. A lease duration +// cannot be changed using renew or change. proposedLeaseID is proposed lease ID, in a GUID string format. The Blob +// service returns 400 (Invalid request) if the proposed lease ID is not in the correct format. See Guid Constructor +// (String) for a list of valid GUID string formats. ifModifiedSince is specify this header value to operate only on a +// blob if it has been modified since the specified date/time. ifUnmodifiedSince is specify this header value to +// operate only on a blob if it has not been modified since the specified date/time. requestID is provides a +// client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage +// analytics logging is enabled. +func (client containerClient) AcquireLease(ctx context.Context, timeout *int32, duration *int32, proposedLeaseID *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, requestID *string) (*ContainerAcquireLeaseResponse, error) { + if err := validate([]validation{ + {targetValue: timeout, + constraints: []constraint{{target: "timeout", name: null, rule: false, + chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { + return nil, err + } + req, err := client.acquireLeasePreparer(timeout, duration, proposedLeaseID, ifModifiedSince, ifUnmodifiedSince, requestID) + if err != nil { + return nil, err + } + resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.acquireLeaseResponder}, req) + if err != nil { + return nil, err + } + return resp.(*ContainerAcquireLeaseResponse), err +} + +// acquireLeasePreparer prepares the AcquireLease request. +func (client containerClient) acquireLeasePreparer(timeout *int32, duration *int32, proposedLeaseID *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, requestID *string) (pipeline.Request, error) { + req, err := pipeline.NewRequest("PUT", client.url, nil) + if err != nil { + return req, pipeline.NewError(err, "failed to create request") + } + params := req.URL.Query() + if timeout != nil { + params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) + } + params.Set("comp", "lease") + params.Set("restype", "container") + req.URL.RawQuery = params.Encode() + if duration != nil { + req.Header.Set("x-ms-lease-duration", strconv.FormatInt(int64(*duration), 10)) + } + if proposedLeaseID != nil { + req.Header.Set("x-ms-proposed-lease-id", *proposedLeaseID) + } + if ifModifiedSince != nil { + req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123)) + } + if ifUnmodifiedSince != nil { + req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123)) + } + req.Header.Set("x-ms-version", ServiceVersion) + if requestID != nil { + req.Header.Set("x-ms-client-request-id", *requestID) + } + req.Header.Set("x-ms-lease-action", "acquire") + return req, nil +} + +// acquireLeaseResponder handles the response to the AcquireLease request. +func (client containerClient) acquireLeaseResponder(resp pipeline.Response) (pipeline.Response, error) { + err := validateResponse(resp, http.StatusOK, http.StatusCreated) + if resp == nil { + return nil, err + } + io.Copy(ioutil.Discard, resp.Response().Body) + resp.Response().Body.Close() + return &ContainerAcquireLeaseResponse{rawResponse: resp.Response()}, err +} + +// BreakLease [Update] establishes and manages a lock on a container for delete operations. The lock duration can be 15 +// to 60 seconds, or can be infinite +// +// timeout is the timeout parameter is expressed in seconds. For more information, see Setting +// Timeouts for Blob Service Operations. breakPeriod is for a break operation, proposed duration the lease should +// continue before it is broken, in seconds, between 0 and 60. This break period is only used if it is shorter than the +// time remaining on the lease. If longer, the time remaining on the lease is used. A new lease will not be available +// before the break period has expired, but the lease may be held for longer than the break period. If this header does +// not appear with a break operation, a fixed-duration lease breaks after the remaining lease period elapses, and an +// infinite lease breaks immediately. ifModifiedSince is specify this header value to operate only on a blob if it has +// been modified since the specified date/time. ifUnmodifiedSince is specify this header value to operate only on a +// blob if it has not been modified since the specified date/time. requestID is provides a client-generated, opaque +// value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled. +func (client containerClient) BreakLease(ctx context.Context, timeout *int32, breakPeriod *int32, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, requestID *string) (*ContainerBreakLeaseResponse, error) { + if err := validate([]validation{ + {targetValue: timeout, + constraints: []constraint{{target: "timeout", name: null, rule: false, + chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { + return nil, err + } + req, err := client.breakLeasePreparer(timeout, breakPeriod, ifModifiedSince, ifUnmodifiedSince, requestID) + if err != nil { + return nil, err + } + resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.breakLeaseResponder}, req) + if err != nil { + return nil, err + } + return resp.(*ContainerBreakLeaseResponse), err +} + +// breakLeasePreparer prepares the BreakLease request. +func (client containerClient) breakLeasePreparer(timeout *int32, breakPeriod *int32, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, requestID *string) (pipeline.Request, error) { + req, err := pipeline.NewRequest("PUT", client.url, nil) + if err != nil { + return req, pipeline.NewError(err, "failed to create request") + } + params := req.URL.Query() + if timeout != nil { + params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) + } + params.Set("comp", "lease") + params.Set("restype", "container") + req.URL.RawQuery = params.Encode() + if breakPeriod != nil { + req.Header.Set("x-ms-lease-break-period", strconv.FormatInt(int64(*breakPeriod), 10)) + } + if ifModifiedSince != nil { + req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123)) + } + if ifUnmodifiedSince != nil { + req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123)) + } + req.Header.Set("x-ms-version", ServiceVersion) + if requestID != nil { + req.Header.Set("x-ms-client-request-id", *requestID) + } + req.Header.Set("x-ms-lease-action", "break") + return req, nil +} + +// breakLeaseResponder handles the response to the BreakLease request. +func (client containerClient) breakLeaseResponder(resp pipeline.Response) (pipeline.Response, error) { + err := validateResponse(resp, http.StatusOK, http.StatusAccepted) + if resp == nil { + return nil, err + } + io.Copy(ioutil.Discard, resp.Response().Body) + resp.Response().Body.Close() + return &ContainerBreakLeaseResponse{rawResponse: resp.Response()}, err +} + +// ChangeLease [Update] establishes and manages a lock on a container for delete operations. The lock duration can be +// 15 to 60 seconds, or can be infinite +// +// leaseID is if specified, the operation only succeeds if the container's lease is active and matches this ID. +// proposedLeaseID is proposed lease ID, in a GUID string format. The Blob service returns 400 (Invalid request) if the +// proposed lease ID is not in the correct format. See Guid Constructor (String) for a list of valid GUID string +// formats. timeout is the timeout parameter is expressed in seconds. For more information, see Setting +// Timeouts for Blob Service Operations. ifModifiedSince is specify this header value to operate only on a blob if +// it has been modified since the specified date/time. ifUnmodifiedSince is specify this header value to operate only +// on a blob if it has not been modified since the specified date/time. requestID is provides a client-generated, +// opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is +// enabled. +func (client containerClient) ChangeLease(ctx context.Context, leaseID string, proposedLeaseID string, timeout *int32, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, requestID *string) (*ContainerChangeLeaseResponse, error) { + if err := validate([]validation{ + {targetValue: timeout, + constraints: []constraint{{target: "timeout", name: null, rule: false, + chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { + return nil, err + } + req, err := client.changeLeasePreparer(leaseID, proposedLeaseID, timeout, ifModifiedSince, ifUnmodifiedSince, requestID) + if err != nil { + return nil, err + } + resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.changeLeaseResponder}, req) + if err != nil { + return nil, err + } + return resp.(*ContainerChangeLeaseResponse), err +} + +// changeLeasePreparer prepares the ChangeLease request. +func (client containerClient) changeLeasePreparer(leaseID string, proposedLeaseID string, timeout *int32, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, requestID *string) (pipeline.Request, error) { + req, err := pipeline.NewRequest("PUT", client.url, nil) + if err != nil { + return req, pipeline.NewError(err, "failed to create request") + } + params := req.URL.Query() + if timeout != nil { + params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) + } + params.Set("comp", "lease") + params.Set("restype", "container") + req.URL.RawQuery = params.Encode() + req.Header.Set("x-ms-lease-id", leaseID) + req.Header.Set("x-ms-proposed-lease-id", proposedLeaseID) + if ifModifiedSince != nil { + req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123)) + } + if ifUnmodifiedSince != nil { + req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123)) + } + req.Header.Set("x-ms-version", ServiceVersion) + if requestID != nil { + req.Header.Set("x-ms-client-request-id", *requestID) + } + req.Header.Set("x-ms-lease-action", "change") + return req, nil +} + +// changeLeaseResponder handles the response to the ChangeLease request. +func (client containerClient) changeLeaseResponder(resp pipeline.Response) (pipeline.Response, error) { + err := validateResponse(resp, http.StatusOK) + if resp == nil { + return nil, err + } + io.Copy(ioutil.Discard, resp.Response().Body) + resp.Response().Body.Close() + return &ContainerChangeLeaseResponse{rawResponse: resp.Response()}, err +} + +// Create creates a new container under the specified account. If the container with the same name already exists, the +// operation fails +// +// timeout is the timeout parameter is expressed in seconds. For more information, see Setting +// Timeouts for Blob Service Operations. metadata is optional. Specifies a user-defined name-value pair associated +// with the blob. If no name-value pairs are specified, the operation will copy the metadata from the source blob or +// file to the destination blob. If one or more name-value pairs are specified, the destination blob is created with +// the specified metadata, and metadata is not copied from the source blob or file. Note that beginning with version +// 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing +// Containers, Blobs, and Metadata for more information. access is specifies whether data in the container may be +// accessed publicly and the level of access requestID is provides a client-generated, opaque value with a 1 KB +// character limit that is recorded in the analytics logs when storage analytics logging is enabled. +func (client containerClient) Create(ctx context.Context, timeout *int32, metadata map[string]string, access PublicAccessType, requestID *string) (*ContainerCreateResponse, error) { + if err := validate([]validation{ + {targetValue: timeout, + constraints: []constraint{{target: "timeout", name: null, rule: false, + chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}, + {targetValue: metadata, + constraints: []constraint{{target: "metadata", name: null, rule: false, + chain: []constraint{{target: "metadata", name: pattern, rule: `^[a-zA-Z]+$`, chain: nil}}}}}}); err != nil { + return nil, err + } + req, err := client.createPreparer(timeout, metadata, access, requestID) + if err != nil { + return nil, err + } + resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.createResponder}, req) + if err != nil { + return nil, err + } + return resp.(*ContainerCreateResponse), err +} + +// createPreparer prepares the Create request. +func (client containerClient) createPreparer(timeout *int32, metadata map[string]string, access PublicAccessType, requestID *string) (pipeline.Request, error) { + req, err := pipeline.NewRequest("PUT", client.url, nil) + if err != nil { + return req, pipeline.NewError(err, "failed to create request") + } + params := req.URL.Query() + if timeout != nil { + params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) + } + params.Set("restype", "container") + req.URL.RawQuery = params.Encode() + if metadata != nil { + for k, v := range metadata { + req.Header.Set("x-ms-meta-"+k, v) + } + } + if access != PublicAccessNone { + req.Header.Set("x-ms-blob-public-access", string(access)) + } + req.Header.Set("x-ms-version", ServiceVersion) + if requestID != nil { + req.Header.Set("x-ms-client-request-id", *requestID) + } + return req, nil +} + +// createResponder handles the response to the Create request. +func (client containerClient) createResponder(resp pipeline.Response) (pipeline.Response, error) { + err := validateResponse(resp, http.StatusOK, http.StatusCreated) + if resp == nil { + return nil, err + } + io.Copy(ioutil.Discard, resp.Response().Body) + resp.Response().Body.Close() + return &ContainerCreateResponse{rawResponse: resp.Response()}, err +} + +// Delete operation marks the specified container for deletion. The container and any blobs contained within it are +// later deleted during garbage collection +// +// timeout is the timeout parameter is expressed in seconds. For more information, see Setting +// Timeouts for Blob Service Operations. leaseID is if specified, the operation only succeeds if the container's +// lease is active and matches this ID. ifModifiedSince is specify this header value to operate only on a blob if it +// has been modified since the specified date/time. ifUnmodifiedSince is specify this header value to operate only on a +// blob if it has not been modified since the specified date/time. requestID is provides a client-generated, opaque +// value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled. +func (client containerClient) Delete(ctx context.Context, timeout *int32, leaseID *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, requestID *string) (*ContainerDeleteResponse, error) { + if err := validate([]validation{ + {targetValue: timeout, + constraints: []constraint{{target: "timeout", name: null, rule: false, + chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { + return nil, err + } + req, err := client.deletePreparer(timeout, leaseID, ifModifiedSince, ifUnmodifiedSince, requestID) + if err != nil { + return nil, err + } + resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.deleteResponder}, req) + if err != nil { + return nil, err + } + return resp.(*ContainerDeleteResponse), err +} + +// deletePreparer prepares the Delete request. +func (client containerClient) deletePreparer(timeout *int32, leaseID *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, requestID *string) (pipeline.Request, error) { + req, err := pipeline.NewRequest("DELETE", client.url, nil) + if err != nil { + return req, pipeline.NewError(err, "failed to create request") + } + params := req.URL.Query() + if timeout != nil { + params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) + } + params.Set("restype", "container") + req.URL.RawQuery = params.Encode() + if leaseID != nil { + req.Header.Set("x-ms-lease-id", *leaseID) + } + if ifModifiedSince != nil { + req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123)) + } + if ifUnmodifiedSince != nil { + req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123)) + } + req.Header.Set("x-ms-version", ServiceVersion) + if requestID != nil { + req.Header.Set("x-ms-client-request-id", *requestID) + } + return req, nil +} + +// deleteResponder handles the response to the Delete request. +func (client containerClient) deleteResponder(resp pipeline.Response) (pipeline.Response, error) { + err := validateResponse(resp, http.StatusOK, http.StatusAccepted) + if resp == nil { + return nil, err + } + io.Copy(ioutil.Discard, resp.Response().Body) + resp.Response().Body.Close() + return &ContainerDeleteResponse{rawResponse: resp.Response()}, err +} + +// GetAccessPolicy gets the permissions for the specified container. The permissions indicate whether container data +// may be accessed publicly. +// +// timeout is the timeout parameter is expressed in seconds. For more information, see Setting +// Timeouts for Blob Service Operations. leaseID is if specified, the operation only succeeds if the container's +// lease is active and matches this ID. requestID is provides a client-generated, opaque value with a 1 KB character +// limit that is recorded in the analytics logs when storage analytics logging is enabled. +func (client containerClient) GetAccessPolicy(ctx context.Context, timeout *int32, leaseID *string, requestID *string) (*SignedIdentifiers, error) { + if err := validate([]validation{ + {targetValue: timeout, + constraints: []constraint{{target: "timeout", name: null, rule: false, + chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { + return nil, err + } + req, err := client.getAccessPolicyPreparer(timeout, leaseID, requestID) + if err != nil { + return nil, err + } + resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.getAccessPolicyResponder}, req) + if err != nil { + return nil, err + } + return resp.(*SignedIdentifiers), err +} + +// getAccessPolicyPreparer prepares the GetAccessPolicy request. +func (client containerClient) getAccessPolicyPreparer(timeout *int32, leaseID *string, requestID *string) (pipeline.Request, error) { + req, err := pipeline.NewRequest("GET", client.url, nil) + if err != nil { + return req, pipeline.NewError(err, "failed to create request") + } + params := req.URL.Query() + if timeout != nil { + params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) + } + params.Set("restype", "container") + params.Set("comp", "acl") + req.URL.RawQuery = params.Encode() + if leaseID != nil { + req.Header.Set("x-ms-lease-id", *leaseID) + } + req.Header.Set("x-ms-version", ServiceVersion) + if requestID != nil { + req.Header.Set("x-ms-client-request-id", *requestID) + } + return req, nil +} + +// getAccessPolicyResponder handles the response to the GetAccessPolicy request. +func (client containerClient) getAccessPolicyResponder(resp pipeline.Response) (pipeline.Response, error) { + err := validateResponse(resp, http.StatusOK) + if resp == nil { + return nil, err + } + result := &SignedIdentifiers{rawResponse: resp.Response()} + if err != nil { + return result, err + } + defer resp.Response().Body.Close() + b, err := ioutil.ReadAll(resp.Response().Body) + if err != nil { + return result, NewResponseError(err, resp.Response(), "failed to read response body") + } + if len(b) > 0 { + b = removeBOM(b) + err = xml.Unmarshal(b, result) + if err != nil { + return result, NewResponseError(err, resp.Response(), "failed to unmarshal response body") + } + } + return result, nil +} + +// GetProperties returns all user-defined metadata and system properties for the specified container. The data returned +// does not include the container's list of blobs +// +// timeout is the timeout parameter is expressed in seconds. For more information, see Setting +// Timeouts for Blob Service Operations. leaseID is if specified, the operation only succeeds if the container's +// lease is active and matches this ID. requestID is provides a client-generated, opaque value with a 1 KB character +// limit that is recorded in the analytics logs when storage analytics logging is enabled. +func (client containerClient) GetProperties(ctx context.Context, timeout *int32, leaseID *string, requestID *string) (*ContainerGetPropertiesResponse, error) { + if err := validate([]validation{ + {targetValue: timeout, + constraints: []constraint{{target: "timeout", name: null, rule: false, + chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { + return nil, err + } + req, err := client.getPropertiesPreparer(timeout, leaseID, requestID) + if err != nil { + return nil, err + } + resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.getPropertiesResponder}, req) + if err != nil { + return nil, err + } + return resp.(*ContainerGetPropertiesResponse), err +} + +// getPropertiesPreparer prepares the GetProperties request. +func (client containerClient) getPropertiesPreparer(timeout *int32, leaseID *string, requestID *string) (pipeline.Request, error) { + req, err := pipeline.NewRequest("GET", client.url, nil) + if err != nil { + return req, pipeline.NewError(err, "failed to create request") + } + params := req.URL.Query() + if timeout != nil { + params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) + } + params.Set("restype", "container") + req.URL.RawQuery = params.Encode() + if leaseID != nil { + req.Header.Set("x-ms-lease-id", *leaseID) + } + req.Header.Set("x-ms-version", ServiceVersion) + if requestID != nil { + req.Header.Set("x-ms-client-request-id", *requestID) + } + return req, nil +} + +// getPropertiesResponder handles the response to the GetProperties request. +func (client containerClient) getPropertiesResponder(resp pipeline.Response) (pipeline.Response, error) { + err := validateResponse(resp, http.StatusOK) + if resp == nil { + return nil, err + } + io.Copy(ioutil.Discard, resp.Response().Body) + resp.Response().Body.Close() + return &ContainerGetPropertiesResponse{rawResponse: resp.Response()}, err +} + +// ListBlobFlatSegment [Update] The List Blobs operation returns a list of the blobs under the specified container +// +// prefix is filters the results to return only containers whose name begins with the specified prefix. marker is a +// string value that identifies the portion of the list of containers to be returned with the next listing operation. +// The operation returns the NextMarker value within the response body if the listing operation did not return all +// containers remaining to be listed with the current page. The NextMarker value can be used as the value for the +// marker parameter in a subsequent call to request the next page of list items. The marker value is opaque to the +// client. maxresults is specifies the maximum number of containers to return. If the request does not specify +// maxresults, or specifies a value greater than 5000, the server will return up to 5000 items. Note that if the +// listing operation crosses a partition boundary, then the service will return a continuation token for retrieving the +// remainder of the results. For this reason, it is possible that the service will return fewer results than specified +// by maxresults, or than the default of 5000. include is include this parameter to specify one or more datasets to +// include in the response. timeout is the timeout parameter is expressed in seconds. For more information, see Setting +// Timeouts for Blob Service Operations. requestID is provides a client-generated, opaque value with a 1 KB +// character limit that is recorded in the analytics logs when storage analytics logging is enabled. +func (client containerClient) ListBlobFlatSegment(ctx context.Context, prefix *string, marker *string, maxresults *int32, include []ListBlobsIncludeItemType, timeout *int32, requestID *string) (*ListBlobsFlatSegmentResponse, error) { + if err := validate([]validation{ + {targetValue: maxresults, + constraints: []constraint{{target: "maxresults", name: null, rule: false, + chain: []constraint{{target: "maxresults", name: inclusiveMinimum, rule: 1, chain: nil}}}}}, + {targetValue: timeout, + constraints: []constraint{{target: "timeout", name: null, rule: false, + chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { + return nil, err + } + req, err := client.listBlobFlatSegmentPreparer(prefix, marker, maxresults, include, timeout, requestID) + if err != nil { + return nil, err + } + resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.listBlobFlatSegmentResponder}, req) + if err != nil { + return nil, err + } + return resp.(*ListBlobsFlatSegmentResponse), err +} + +// listBlobFlatSegmentPreparer prepares the ListBlobFlatSegment request. +func (client containerClient) listBlobFlatSegmentPreparer(prefix *string, marker *string, maxresults *int32, include []ListBlobsIncludeItemType, timeout *int32, requestID *string) (pipeline.Request, error) { + req, err := pipeline.NewRequest("GET", client.url, nil) + if err != nil { + return req, pipeline.NewError(err, "failed to create request") + } + params := req.URL.Query() + if prefix != nil && len(*prefix) > 0 { + params.Set("prefix", *prefix) + } + if marker != nil && len(*marker) > 0 { + params.Set("marker", *marker) + } + if maxresults != nil { + params.Set("maxresults", strconv.FormatInt(int64(*maxresults), 10)) + } + if include != nil && len(include) > 0 { + params.Set("include", joinConst(include, ",")) + } + if timeout != nil { + params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) + } + params.Set("restype", "container") + params.Set("comp", "list") + req.URL.RawQuery = params.Encode() + req.Header.Set("x-ms-version", ServiceVersion) + if requestID != nil { + req.Header.Set("x-ms-client-request-id", *requestID) + } + return req, nil +} + +// listBlobFlatSegmentResponder handles the response to the ListBlobFlatSegment request. +func (client containerClient) listBlobFlatSegmentResponder(resp pipeline.Response) (pipeline.Response, error) { + err := validateResponse(resp, http.StatusOK) + if resp == nil { + return nil, err + } + result := &ListBlobsFlatSegmentResponse{rawResponse: resp.Response()} + if err != nil { + return result, err + } + defer resp.Response().Body.Close() + b, err := ioutil.ReadAll(resp.Response().Body) + if err != nil { + return result, NewResponseError(err, resp.Response(), "failed to read response body") + } + if len(b) > 0 { + b = removeBOM(b) + err = xml.Unmarshal(b, result) + if err != nil { + return result, NewResponseError(err, resp.Response(), "failed to unmarshal response body") + } + } + return result, nil +} + +// ListBlobHierarchySegment [Update] The List Blobs operation returns a list of the blobs under the specified container +// +// delimiter is when the request includes this parameter, the operation returns a BlobPrefix element in the response +// body that acts as a placeholder for all blobs whose names begin with the same substring up to the appearance of the +// delimiter character. The delimiter may be a single character or a string. prefix is filters the results to return +// only containers whose name begins with the specified prefix. marker is a string value that identifies the portion of +// the list of containers to be returned with the next listing operation. The operation returns the NextMarker value +// within the response body if the listing operation did not return all containers remaining to be listed with the +// current page. The NextMarker value can be used as the value for the marker parameter in a subsequent call to request +// the next page of list items. The marker value is opaque to the client. maxresults is specifies the maximum number of +// containers to return. If the request does not specify maxresults, or specifies a value greater than 5000, the server +// will return up to 5000 items. Note that if the listing operation crosses a partition boundary, then the service will +// return a continuation token for retrieving the remainder of the results. For this reason, it is possible that the +// service will return fewer results than specified by maxresults, or than the default of 5000. include is include this +// parameter to specify one or more datasets to include in the response. timeout is the timeout parameter is expressed +// in seconds. For more information, see Setting +// Timeouts for Blob Service Operations. requestID is provides a client-generated, opaque value with a 1 KB +// character limit that is recorded in the analytics logs when storage analytics logging is enabled. +func (client containerClient) ListBlobHierarchySegment(ctx context.Context, delimiter string, prefix *string, marker *string, maxresults *int32, include []ListBlobsIncludeItemType, timeout *int32, requestID *string) (*ListBlobsHierarchySegmentResponse, error) { + if err := validate([]validation{ + {targetValue: maxresults, + constraints: []constraint{{target: "maxresults", name: null, rule: false, + chain: []constraint{{target: "maxresults", name: inclusiveMinimum, rule: 1, chain: nil}}}}}, + {targetValue: timeout, + constraints: []constraint{{target: "timeout", name: null, rule: false, + chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { + return nil, err + } + req, err := client.listBlobHierarchySegmentPreparer(delimiter, prefix, marker, maxresults, include, timeout, requestID) + if err != nil { + return nil, err + } + resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.listBlobHierarchySegmentResponder}, req) + if err != nil { + return nil, err + } + return resp.(*ListBlobsHierarchySegmentResponse), err +} + +// listBlobHierarchySegmentPreparer prepares the ListBlobHierarchySegment request. +func (client containerClient) listBlobHierarchySegmentPreparer(delimiter string, prefix *string, marker *string, maxresults *int32, include []ListBlobsIncludeItemType, timeout *int32, requestID *string) (pipeline.Request, error) { + req, err := pipeline.NewRequest("GET", client.url, nil) + if err != nil { + return req, pipeline.NewError(err, "failed to create request") + } + params := req.URL.Query() + if prefix != nil && len(*prefix) > 0 { + params.Set("prefix", *prefix) + } + params.Set("delimiter", delimiter) + if marker != nil && len(*marker) > 0 { + params.Set("marker", *marker) + } + if maxresults != nil { + params.Set("maxresults", strconv.FormatInt(int64(*maxresults), 10)) + } + if include != nil && len(include) > 0 { + params.Set("include", joinConst(include, ",")) + } + if timeout != nil { + params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) + } + params.Set("restype", "container") + params.Set("comp", "list") + req.URL.RawQuery = params.Encode() + req.Header.Set("x-ms-version", ServiceVersion) + if requestID != nil { + req.Header.Set("x-ms-client-request-id", *requestID) + } + return req, nil +} + +// listBlobHierarchySegmentResponder handles the response to the ListBlobHierarchySegment request. +func (client containerClient) listBlobHierarchySegmentResponder(resp pipeline.Response) (pipeline.Response, error) { + err := validateResponse(resp, http.StatusOK) + if resp == nil { + return nil, err + } + result := &ListBlobsHierarchySegmentResponse{rawResponse: resp.Response()} + if err != nil { + return result, err + } + defer resp.Response().Body.Close() + b, err := ioutil.ReadAll(resp.Response().Body) + if err != nil { + return result, NewResponseError(err, resp.Response(), "failed to read response body") + } + if len(b) > 0 { + b = removeBOM(b) + err = xml.Unmarshal(b, result) + if err != nil { + return result, NewResponseError(err, resp.Response(), "failed to unmarshal response body") + } + } + return result, nil +} + +// ReleaseLease [Update] establishes and manages a lock on a container for delete operations. The lock duration can be +// 15 to 60 seconds, or can be infinite +// +// leaseID is if specified, the operation only succeeds if the container's lease is active and matches this ID. timeout +// is the timeout parameter is expressed in seconds. For more information, see Setting +// Timeouts for Blob Service Operations. ifModifiedSince is specify this header value to operate only on a blob if +// it has been modified since the specified date/time. ifUnmodifiedSince is specify this header value to operate only +// on a blob if it has not been modified since the specified date/time. requestID is provides a client-generated, +// opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is +// enabled. +func (client containerClient) ReleaseLease(ctx context.Context, leaseID string, timeout *int32, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, requestID *string) (*ContainerReleaseLeaseResponse, error) { + if err := validate([]validation{ + {targetValue: timeout, + constraints: []constraint{{target: "timeout", name: null, rule: false, + chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { + return nil, err + } + req, err := client.releaseLeasePreparer(leaseID, timeout, ifModifiedSince, ifUnmodifiedSince, requestID) + if err != nil { + return nil, err + } + resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.releaseLeaseResponder}, req) + if err != nil { + return nil, err + } + return resp.(*ContainerReleaseLeaseResponse), err +} + +// releaseLeasePreparer prepares the ReleaseLease request. +func (client containerClient) releaseLeasePreparer(leaseID string, timeout *int32, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, requestID *string) (pipeline.Request, error) { + req, err := pipeline.NewRequest("PUT", client.url, nil) + if err != nil { + return req, pipeline.NewError(err, "failed to create request") + } + params := req.URL.Query() + if timeout != nil { + params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) + } + params.Set("comp", "lease") + params.Set("restype", "container") + req.URL.RawQuery = params.Encode() + req.Header.Set("x-ms-lease-id", leaseID) + if ifModifiedSince != nil { + req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123)) + } + if ifUnmodifiedSince != nil { + req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123)) + } + req.Header.Set("x-ms-version", ServiceVersion) + if requestID != nil { + req.Header.Set("x-ms-client-request-id", *requestID) + } + req.Header.Set("x-ms-lease-action", "release") + return req, nil +} + +// releaseLeaseResponder handles the response to the ReleaseLease request. +func (client containerClient) releaseLeaseResponder(resp pipeline.Response) (pipeline.Response, error) { + err := validateResponse(resp, http.StatusOK) + if resp == nil { + return nil, err + } + io.Copy(ioutil.Discard, resp.Response().Body) + resp.Response().Body.Close() + return &ContainerReleaseLeaseResponse{rawResponse: resp.Response()}, err +} + +// RenewLease [Update] establishes and manages a lock on a container for delete operations. The lock duration can be 15 +// to 60 seconds, or can be infinite +// +// leaseID is if specified, the operation only succeeds if the container's lease is active and matches this ID. timeout +// is the timeout parameter is expressed in seconds. For more information, see Setting +// Timeouts for Blob Service Operations. ifModifiedSince is specify this header value to operate only on a blob if +// it has been modified since the specified date/time. ifUnmodifiedSince is specify this header value to operate only +// on a blob if it has not been modified since the specified date/time. requestID is provides a client-generated, +// opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is +// enabled. +func (client containerClient) RenewLease(ctx context.Context, leaseID string, timeout *int32, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, requestID *string) (*ContainerRenewLeaseResponse, error) { + if err := validate([]validation{ + {targetValue: timeout, + constraints: []constraint{{target: "timeout", name: null, rule: false, + chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { + return nil, err + } + req, err := client.renewLeasePreparer(leaseID, timeout, ifModifiedSince, ifUnmodifiedSince, requestID) + if err != nil { + return nil, err + } + resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.renewLeaseResponder}, req) + if err != nil { + return nil, err + } + return resp.(*ContainerRenewLeaseResponse), err +} + +// renewLeasePreparer prepares the RenewLease request. +func (client containerClient) renewLeasePreparer(leaseID string, timeout *int32, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, requestID *string) (pipeline.Request, error) { + req, err := pipeline.NewRequest("PUT", client.url, nil) + if err != nil { + return req, pipeline.NewError(err, "failed to create request") + } + params := req.URL.Query() + if timeout != nil { + params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) + } + params.Set("comp", "lease") + params.Set("restype", "container") + req.URL.RawQuery = params.Encode() + req.Header.Set("x-ms-lease-id", leaseID) + if ifModifiedSince != nil { + req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123)) + } + if ifUnmodifiedSince != nil { + req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123)) + } + req.Header.Set("x-ms-version", ServiceVersion) + if requestID != nil { + req.Header.Set("x-ms-client-request-id", *requestID) + } + req.Header.Set("x-ms-lease-action", "renew") + return req, nil +} + +// renewLeaseResponder handles the response to the RenewLease request. +func (client containerClient) renewLeaseResponder(resp pipeline.Response) (pipeline.Response, error) { + err := validateResponse(resp, http.StatusOK) + if resp == nil { + return nil, err + } + io.Copy(ioutil.Discard, resp.Response().Body) + resp.Response().Body.Close() + return &ContainerRenewLeaseResponse{rawResponse: resp.Response()}, err +} + +// SetAccessPolicy sets the permissions for the specified container. The permissions indicate whether blobs in a +// container may be accessed publicly. +// +// containerACL is the acls for the container timeout is the timeout parameter is expressed in seconds. For more +// information, see Setting +// Timeouts for Blob Service Operations. leaseID is if specified, the operation only succeeds if the container's +// lease is active and matches this ID. access is specifies whether data in the container may be accessed publicly and +// the level of access ifModifiedSince is specify this header value to operate only on a blob if it has been modified +// since the specified date/time. ifUnmodifiedSince is specify this header value to operate only on a blob if it has +// not been modified since the specified date/time. requestID is provides a client-generated, opaque value with a 1 KB +// character limit that is recorded in the analytics logs when storage analytics logging is enabled. +func (client containerClient) SetAccessPolicy(ctx context.Context, containerACL []SignedIdentifier, timeout *int32, leaseID *string, access PublicAccessType, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, requestID *string) (*ContainerSetAccessPolicyResponse, error) { + if err := validate([]validation{ + {targetValue: timeout, + constraints: []constraint{{target: "timeout", name: null, rule: false, + chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { + return nil, err + } + req, err := client.setAccessPolicyPreparer(containerACL, timeout, leaseID, access, ifModifiedSince, ifUnmodifiedSince, requestID) + if err != nil { + return nil, err + } + resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.setAccessPolicyResponder}, req) + if err != nil { + return nil, err + } + return resp.(*ContainerSetAccessPolicyResponse), err +} + +// setAccessPolicyPreparer prepares the SetAccessPolicy request. +func (client containerClient) setAccessPolicyPreparer(containerACL []SignedIdentifier, timeout *int32, leaseID *string, access PublicAccessType, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, requestID *string) (pipeline.Request, error) { + req, err := pipeline.NewRequest("PUT", client.url, nil) + if err != nil { + return req, pipeline.NewError(err, "failed to create request") + } + params := req.URL.Query() + if timeout != nil { + params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) + } + params.Set("restype", "container") + params.Set("comp", "acl") + req.URL.RawQuery = params.Encode() + if leaseID != nil { + req.Header.Set("x-ms-lease-id", *leaseID) + } + if access != PublicAccessNone { + req.Header.Set("x-ms-blob-public-access", string(access)) + } + if ifModifiedSince != nil { + req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123)) + } + if ifUnmodifiedSince != nil { + req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123)) + } + req.Header.Set("x-ms-version", ServiceVersion) + if requestID != nil { + req.Header.Set("x-ms-client-request-id", *requestID) + } + b, err := xml.Marshal(SignedIdentifiers{Items: containerACL}) + if err != nil { + return req, pipeline.NewError(err, "failed to marshal request body") + } + req.Header.Set("Content-Type", "application/xml") + err = req.SetBody(bytes.NewReader(b)) + if err != nil { + return req, pipeline.NewError(err, "failed to set request body") + } + return req, nil +} + +// setAccessPolicyResponder handles the response to the SetAccessPolicy request. +func (client containerClient) setAccessPolicyResponder(resp pipeline.Response) (pipeline.Response, error) { + err := validateResponse(resp, http.StatusOK) + if resp == nil { + return nil, err + } + io.Copy(ioutil.Discard, resp.Response().Body) + resp.Response().Body.Close() + return &ContainerSetAccessPolicyResponse{rawResponse: resp.Response()}, err +} + +// SetMetadata operation sets one or more user-defined name-value pairs for the specified container. +// +// timeout is the timeout parameter is expressed in seconds. For more information, see Setting +// Timeouts for Blob Service Operations. leaseID is if specified, the operation only succeeds if the container's +// lease is active and matches this ID. metadata is optional. Specifies a user-defined name-value pair associated with +// the blob. If no name-value pairs are specified, the operation will copy the metadata from the source blob or file to +// the destination blob. If one or more name-value pairs are specified, the destination blob is created with the +// specified metadata, and metadata is not copied from the source blob or file. Note that beginning with version +// 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing +// Containers, Blobs, and Metadata for more information. ifModifiedSince is specify this header value to operate only +// on a blob if it has been modified since the specified date/time. requestID is provides a client-generated, opaque +// value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled. +func (client containerClient) SetMetadata(ctx context.Context, timeout *int32, leaseID *string, metadata map[string]string, ifModifiedSince *time.Time, requestID *string) (*ContainerSetMetadataResponse, error) { + if err := validate([]validation{ + {targetValue: timeout, + constraints: []constraint{{target: "timeout", name: null, rule: false, + chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}, + {targetValue: metadata, + constraints: []constraint{{target: "metadata", name: null, rule: false, + chain: []constraint{{target: "metadata", name: pattern, rule: `^[a-zA-Z]+$`, chain: nil}}}}}}); err != nil { + return nil, err + } + req, err := client.setMetadataPreparer(timeout, leaseID, metadata, ifModifiedSince, requestID) + if err != nil { + return nil, err + } + resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.setMetadataResponder}, req) + if err != nil { + return nil, err + } + return resp.(*ContainerSetMetadataResponse), err +} + +// setMetadataPreparer prepares the SetMetadata request. +func (client containerClient) setMetadataPreparer(timeout *int32, leaseID *string, metadata map[string]string, ifModifiedSince *time.Time, requestID *string) (pipeline.Request, error) { + req, err := pipeline.NewRequest("PUT", client.url, nil) + if err != nil { + return req, pipeline.NewError(err, "failed to create request") + } + params := req.URL.Query() + if timeout != nil { + params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) + } + params.Set("restype", "container") + params.Set("comp", "metadata") + req.URL.RawQuery = params.Encode() + if leaseID != nil { + req.Header.Set("x-ms-lease-id", *leaseID) + } + if metadata != nil { + for k, v := range metadata { + req.Header.Set("x-ms-meta-"+k, v) + } + } + if ifModifiedSince != nil { + req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123)) + } + req.Header.Set("x-ms-version", ServiceVersion) + if requestID != nil { + req.Header.Set("x-ms-client-request-id", *requestID) + } + return req, nil +} + +// setMetadataResponder handles the response to the SetMetadata request. +func (client containerClient) setMetadataResponder(resp pipeline.Response) (pipeline.Response, error) { + err := validateResponse(resp, http.StatusOK) + if resp == nil { + return nil, err + } + io.Copy(ioutil.Discard, resp.Response().Body) + resp.Response().Body.Close() + return &ContainerSetMetadataResponse{rawResponse: resp.Response()}, err +} diff --git a/vendor/github.com/Azure/azure-storage-blob-go/2018-03-28/azblob/zz_generated_models.go b/vendor/github.com/Azure/azure-storage-blob-go/2018-03-28/azblob/zz_generated_models.go new file mode 100644 index 000000000000..3d8114ae6b5e --- /dev/null +++ b/vendor/github.com/Azure/azure-storage-blob-go/2018-03-28/azblob/zz_generated_models.go @@ -0,0 +1,4501 @@ +package azblob + +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "encoding/base64" + "encoding/xml" + "io" + "net/http" + "reflect" + "strconv" + "strings" + "time" + "unsafe" +) + +// ETag is an entity tag. +type ETag string + +const ( + // ETagNone represents an empty entity tag. + ETagNone ETag = "" + + // ETagAny matches any entity tag. + ETagAny ETag = "*" +) + +// Metadata contains metadata key/value pairs. +type Metadata map[string]string + +const mdPrefix = "x-ms-meta-" + +const mdPrefixLen = len(mdPrefix) + +// UnmarshalXML implements the xml.Unmarshaler interface for Metadata. +func (md *Metadata) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { + tokName := "" + for t, err := d.Token(); err == nil; t, err = d.Token() { + switch tt := t.(type) { + case xml.StartElement: + tokName = strings.ToLower(tt.Name.Local) + break + case xml.CharData: + if *md == nil { + *md = Metadata{} + } + (*md)[tokName] = string(tt) + break + } + } + return nil +} + +// Marker represents an opaque value used in paged responses. +type Marker struct { + val *string +} + +// NotDone returns true if the list enumeration should be started or is not yet complete. Specifically, NotDone returns true +// for a just-initialized (zero value) Marker indicating that you should make an initial request to get a result portion from +// the service. NotDone also returns true whenever the service returns an interim result portion. NotDone returns false only +// after the service has returned the final result portion. +func (m Marker) NotDone() bool { + return m.val == nil || *m.val != "" +} + +// UnmarshalXML implements the xml.Unmarshaler interface for Marker. +func (m *Marker) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { + var out string + err := d.DecodeElement(&out, &start) + m.val = &out + return err +} + +// concatenates a slice of const values with the specified separator between each item +func joinConst(s interface{}, sep string) string { + v := reflect.ValueOf(s) + if v.Kind() != reflect.Slice && v.Kind() != reflect.Array { + panic("s wasn't a slice or array") + } + ss := make([]string, 0, v.Len()) + for i := 0; i < v.Len(); i++ { + ss = append(ss, v.Index(i).String()) + } + return strings.Join(ss, sep) +} + +// AccessTierType enumerates the values for access tier type. +type AccessTierType string + +const ( + // AccessTierArchive ... + AccessTierArchive AccessTierType = "Archive" + // AccessTierCool ... + AccessTierCool AccessTierType = "Cool" + // AccessTierHot ... + AccessTierHot AccessTierType = "Hot" + // AccessTierNone represents an empty AccessTierType. + AccessTierNone AccessTierType = "" + // AccessTierP10 ... + AccessTierP10 AccessTierType = "P10" + // AccessTierP20 ... + AccessTierP20 AccessTierType = "P20" + // AccessTierP30 ... + AccessTierP30 AccessTierType = "P30" + // AccessTierP4 ... + AccessTierP4 AccessTierType = "P4" + // AccessTierP40 ... + AccessTierP40 AccessTierType = "P40" + // AccessTierP50 ... + AccessTierP50 AccessTierType = "P50" + // AccessTierP6 ... + AccessTierP6 AccessTierType = "P6" +) + +// PossibleAccessTierTypeValues returns an array of possible values for the AccessTierType const type. +func PossibleAccessTierTypeValues() []AccessTierType { + return []AccessTierType{AccessTierArchive, AccessTierCool, AccessTierHot, AccessTierNone, AccessTierP10, AccessTierP20, AccessTierP30, AccessTierP4, AccessTierP40, AccessTierP50, AccessTierP6} +} + +// ArchiveStatusType enumerates the values for archive status type. +type ArchiveStatusType string + +const ( + // ArchiveStatusNone represents an empty ArchiveStatusType. + ArchiveStatusNone ArchiveStatusType = "" + // ArchiveStatusRehydratePendingToCool ... + ArchiveStatusRehydratePendingToCool ArchiveStatusType = "rehydrate-pending-to-cool" + // ArchiveStatusRehydratePendingToHot ... + ArchiveStatusRehydratePendingToHot ArchiveStatusType = "rehydrate-pending-to-hot" +) + +// PossibleArchiveStatusTypeValues returns an array of possible values for the ArchiveStatusType const type. +func PossibleArchiveStatusTypeValues() []ArchiveStatusType { + return []ArchiveStatusType{ArchiveStatusNone, ArchiveStatusRehydratePendingToCool, ArchiveStatusRehydratePendingToHot} +} + +// BlobType enumerates the values for blob type. +type BlobType string + +const ( + // BlobAppendBlob ... + BlobAppendBlob BlobType = "AppendBlob" + // BlobBlockBlob ... + BlobBlockBlob BlobType = "BlockBlob" + // BlobNone represents an empty BlobType. + BlobNone BlobType = "" + // BlobPageBlob ... + BlobPageBlob BlobType = "PageBlob" +) + +// PossibleBlobTypeValues returns an array of possible values for the BlobType const type. +func PossibleBlobTypeValues() []BlobType { + return []BlobType{BlobAppendBlob, BlobBlockBlob, BlobNone, BlobPageBlob} +} + +// BlockListType enumerates the values for block list type. +type BlockListType string + +const ( + // BlockListAll ... + BlockListAll BlockListType = "all" + // BlockListCommitted ... + BlockListCommitted BlockListType = "committed" + // BlockListNone represents an empty BlockListType. + BlockListNone BlockListType = "" + // BlockListUncommitted ... + BlockListUncommitted BlockListType = "uncommitted" +) + +// PossibleBlockListTypeValues returns an array of possible values for the BlockListType const type. +func PossibleBlockListTypeValues() []BlockListType { + return []BlockListType{BlockListAll, BlockListCommitted, BlockListNone, BlockListUncommitted} +} + +// CopyStatusType enumerates the values for copy status type. +type CopyStatusType string + +const ( + // CopyStatusAborted ... + CopyStatusAborted CopyStatusType = "aborted" + // CopyStatusFailed ... + CopyStatusFailed CopyStatusType = "failed" + // CopyStatusNone represents an empty CopyStatusType. + CopyStatusNone CopyStatusType = "" + // CopyStatusPending ... + CopyStatusPending CopyStatusType = "pending" + // CopyStatusSuccess ... + CopyStatusSuccess CopyStatusType = "success" +) + +// PossibleCopyStatusTypeValues returns an array of possible values for the CopyStatusType const type. +func PossibleCopyStatusTypeValues() []CopyStatusType { + return []CopyStatusType{CopyStatusAborted, CopyStatusFailed, CopyStatusNone, CopyStatusPending, CopyStatusSuccess} +} + +// DeleteSnapshotsOptionType enumerates the values for delete snapshots option type. +type DeleteSnapshotsOptionType string + +const ( + // DeleteSnapshotsOptionInclude ... + DeleteSnapshotsOptionInclude DeleteSnapshotsOptionType = "include" + // DeleteSnapshotsOptionNone represents an empty DeleteSnapshotsOptionType. + DeleteSnapshotsOptionNone DeleteSnapshotsOptionType = "" + // DeleteSnapshotsOptionOnly ... + DeleteSnapshotsOptionOnly DeleteSnapshotsOptionType = "only" +) + +// PossibleDeleteSnapshotsOptionTypeValues returns an array of possible values for the DeleteSnapshotsOptionType const type. +func PossibleDeleteSnapshotsOptionTypeValues() []DeleteSnapshotsOptionType { + return []DeleteSnapshotsOptionType{DeleteSnapshotsOptionInclude, DeleteSnapshotsOptionNone, DeleteSnapshotsOptionOnly} +} + +// GeoReplicationStatusType enumerates the values for geo replication status type. +type GeoReplicationStatusType string + +const ( + // GeoReplicationStatusBootstrap ... + GeoReplicationStatusBootstrap GeoReplicationStatusType = "bootstrap" + // GeoReplicationStatusLive ... + GeoReplicationStatusLive GeoReplicationStatusType = "live" + // GeoReplicationStatusNone represents an empty GeoReplicationStatusType. + GeoReplicationStatusNone GeoReplicationStatusType = "" + // GeoReplicationStatusUnavailable ... + GeoReplicationStatusUnavailable GeoReplicationStatusType = "unavailable" +) + +// PossibleGeoReplicationStatusTypeValues returns an array of possible values for the GeoReplicationStatusType const type. +func PossibleGeoReplicationStatusTypeValues() []GeoReplicationStatusType { + return []GeoReplicationStatusType{GeoReplicationStatusBootstrap, GeoReplicationStatusLive, GeoReplicationStatusNone, GeoReplicationStatusUnavailable} +} + +// LeaseDurationType enumerates the values for lease duration type. +type LeaseDurationType string + +const ( + // LeaseDurationFixed ... + LeaseDurationFixed LeaseDurationType = "fixed" + // LeaseDurationInfinite ... + LeaseDurationInfinite LeaseDurationType = "infinite" + // LeaseDurationNone represents an empty LeaseDurationType. + LeaseDurationNone LeaseDurationType = "" +) + +// PossibleLeaseDurationTypeValues returns an array of possible values for the LeaseDurationType const type. +func PossibleLeaseDurationTypeValues() []LeaseDurationType { + return []LeaseDurationType{LeaseDurationFixed, LeaseDurationInfinite, LeaseDurationNone} +} + +// LeaseStateType enumerates the values for lease state type. +type LeaseStateType string + +const ( + // LeaseStateAvailable ... + LeaseStateAvailable LeaseStateType = "available" + // LeaseStateBreaking ... + LeaseStateBreaking LeaseStateType = "breaking" + // LeaseStateBroken ... + LeaseStateBroken LeaseStateType = "broken" + // LeaseStateExpired ... + LeaseStateExpired LeaseStateType = "expired" + // LeaseStateLeased ... + LeaseStateLeased LeaseStateType = "leased" + // LeaseStateNone represents an empty LeaseStateType. + LeaseStateNone LeaseStateType = "" +) + +// PossibleLeaseStateTypeValues returns an array of possible values for the LeaseStateType const type. +func PossibleLeaseStateTypeValues() []LeaseStateType { + return []LeaseStateType{LeaseStateAvailable, LeaseStateBreaking, LeaseStateBroken, LeaseStateExpired, LeaseStateLeased, LeaseStateNone} +} + +// LeaseStatusType enumerates the values for lease status type. +type LeaseStatusType string + +const ( + // LeaseStatusLocked ... + LeaseStatusLocked LeaseStatusType = "locked" + // LeaseStatusNone represents an empty LeaseStatusType. + LeaseStatusNone LeaseStatusType = "" + // LeaseStatusUnlocked ... + LeaseStatusUnlocked LeaseStatusType = "unlocked" +) + +// PossibleLeaseStatusTypeValues returns an array of possible values for the LeaseStatusType const type. +func PossibleLeaseStatusTypeValues() []LeaseStatusType { + return []LeaseStatusType{LeaseStatusLocked, LeaseStatusNone, LeaseStatusUnlocked} +} + +// ListBlobsIncludeItemType enumerates the values for list blobs include item type. +type ListBlobsIncludeItemType string + +const ( + // ListBlobsIncludeItemCopy ... + ListBlobsIncludeItemCopy ListBlobsIncludeItemType = "copy" + // ListBlobsIncludeItemDeleted ... + ListBlobsIncludeItemDeleted ListBlobsIncludeItemType = "deleted" + // ListBlobsIncludeItemMetadata ... + ListBlobsIncludeItemMetadata ListBlobsIncludeItemType = "metadata" + // ListBlobsIncludeItemNone represents an empty ListBlobsIncludeItemType. + ListBlobsIncludeItemNone ListBlobsIncludeItemType = "" + // ListBlobsIncludeItemSnapshots ... + ListBlobsIncludeItemSnapshots ListBlobsIncludeItemType = "snapshots" + // ListBlobsIncludeItemUncommittedblobs ... + ListBlobsIncludeItemUncommittedblobs ListBlobsIncludeItemType = "uncommittedblobs" +) + +// PossibleListBlobsIncludeItemTypeValues returns an array of possible values for the ListBlobsIncludeItemType const type. +func PossibleListBlobsIncludeItemTypeValues() []ListBlobsIncludeItemType { + return []ListBlobsIncludeItemType{ListBlobsIncludeItemCopy, ListBlobsIncludeItemDeleted, ListBlobsIncludeItemMetadata, ListBlobsIncludeItemNone, ListBlobsIncludeItemSnapshots, ListBlobsIncludeItemUncommittedblobs} +} + +// ListContainersIncludeType enumerates the values for list containers include type. +type ListContainersIncludeType string + +const ( + // ListContainersIncludeMetadata ... + ListContainersIncludeMetadata ListContainersIncludeType = "metadata" + // ListContainersIncludeNone represents an empty ListContainersIncludeType. + ListContainersIncludeNone ListContainersIncludeType = "" +) + +// PossibleListContainersIncludeTypeValues returns an array of possible values for the ListContainersIncludeType const type. +func PossibleListContainersIncludeTypeValues() []ListContainersIncludeType { + return []ListContainersIncludeType{ListContainersIncludeMetadata, ListContainersIncludeNone} +} + +// PublicAccessType enumerates the values for public access type. +type PublicAccessType string + +const ( + // PublicAccessBlob ... + PublicAccessBlob PublicAccessType = "blob" + // PublicAccessContainer ... + PublicAccessContainer PublicAccessType = "container" + // PublicAccessNone represents an empty PublicAccessType. + PublicAccessNone PublicAccessType = "" +) + +// PossiblePublicAccessTypeValues returns an array of possible values for the PublicAccessType const type. +func PossiblePublicAccessTypeValues() []PublicAccessType { + return []PublicAccessType{PublicAccessBlob, PublicAccessContainer, PublicAccessNone} +} + +// SequenceNumberActionType enumerates the values for sequence number action type. +type SequenceNumberActionType string + +const ( + // SequenceNumberActionIncrement ... + SequenceNumberActionIncrement SequenceNumberActionType = "increment" + // SequenceNumberActionMax ... + SequenceNumberActionMax SequenceNumberActionType = "max" + // SequenceNumberActionNone represents an empty SequenceNumberActionType. + SequenceNumberActionNone SequenceNumberActionType = "" + // SequenceNumberActionUpdate ... + SequenceNumberActionUpdate SequenceNumberActionType = "update" +) + +// PossibleSequenceNumberActionTypeValues returns an array of possible values for the SequenceNumberActionType const type. +func PossibleSequenceNumberActionTypeValues() []SequenceNumberActionType { + return []SequenceNumberActionType{SequenceNumberActionIncrement, SequenceNumberActionMax, SequenceNumberActionNone, SequenceNumberActionUpdate} +} + +// StorageErrorCodeType enumerates the values for storage error code type. +type StorageErrorCodeType string + +const ( + // StorageErrorCodeAccountAlreadyExists ... + StorageErrorCodeAccountAlreadyExists StorageErrorCodeType = "AccountAlreadyExists" + // StorageErrorCodeAccountBeingCreated ... + StorageErrorCodeAccountBeingCreated StorageErrorCodeType = "AccountBeingCreated" + // StorageErrorCodeAccountIsDisabled ... + StorageErrorCodeAccountIsDisabled StorageErrorCodeType = "AccountIsDisabled" + // StorageErrorCodeAppendPositionConditionNotMet ... + StorageErrorCodeAppendPositionConditionNotMet StorageErrorCodeType = "AppendPositionConditionNotMet" + // StorageErrorCodeAuthenticationFailed ... + StorageErrorCodeAuthenticationFailed StorageErrorCodeType = "AuthenticationFailed" + // StorageErrorCodeBlobAlreadyExists ... + StorageErrorCodeBlobAlreadyExists StorageErrorCodeType = "BlobAlreadyExists" + // StorageErrorCodeBlobArchived ... + StorageErrorCodeBlobArchived StorageErrorCodeType = "BlobArchived" + // StorageErrorCodeBlobBeingRehydrated ... + StorageErrorCodeBlobBeingRehydrated StorageErrorCodeType = "BlobBeingRehydrated" + // StorageErrorCodeBlobNotArchived ... + StorageErrorCodeBlobNotArchived StorageErrorCodeType = "BlobNotArchived" + // StorageErrorCodeBlobNotFound ... + StorageErrorCodeBlobNotFound StorageErrorCodeType = "BlobNotFound" + // StorageErrorCodeBlobOverwritten ... + StorageErrorCodeBlobOverwritten StorageErrorCodeType = "BlobOverwritten" + // StorageErrorCodeBlobTierInadequateForContentLength ... + StorageErrorCodeBlobTierInadequateForContentLength StorageErrorCodeType = "BlobTierInadequateForContentLength" + // StorageErrorCodeBlockCountExceedsLimit ... + StorageErrorCodeBlockCountExceedsLimit StorageErrorCodeType = "BlockCountExceedsLimit" + // StorageErrorCodeBlockListTooLong ... + StorageErrorCodeBlockListTooLong StorageErrorCodeType = "BlockListTooLong" + // StorageErrorCodeCannotChangeToLowerTier ... + StorageErrorCodeCannotChangeToLowerTier StorageErrorCodeType = "CannotChangeToLowerTier" + // StorageErrorCodeCannotVerifyCopySource ... + StorageErrorCodeCannotVerifyCopySource StorageErrorCodeType = "CannotVerifyCopySource" + // StorageErrorCodeConditionHeadersNotSupported ... + StorageErrorCodeConditionHeadersNotSupported StorageErrorCodeType = "ConditionHeadersNotSupported" + // StorageErrorCodeConditionNotMet ... + StorageErrorCodeConditionNotMet StorageErrorCodeType = "ConditionNotMet" + // StorageErrorCodeContainerAlreadyExists ... + StorageErrorCodeContainerAlreadyExists StorageErrorCodeType = "ContainerAlreadyExists" + // StorageErrorCodeContainerBeingDeleted ... + StorageErrorCodeContainerBeingDeleted StorageErrorCodeType = "ContainerBeingDeleted" + // StorageErrorCodeContainerDisabled ... + StorageErrorCodeContainerDisabled StorageErrorCodeType = "ContainerDisabled" + // StorageErrorCodeContainerNotFound ... + StorageErrorCodeContainerNotFound StorageErrorCodeType = "ContainerNotFound" + // StorageErrorCodeContentLengthLargerThanTierLimit ... + StorageErrorCodeContentLengthLargerThanTierLimit StorageErrorCodeType = "ContentLengthLargerThanTierLimit" + // StorageErrorCodeCopyAcrossAccountsNotSupported ... + StorageErrorCodeCopyAcrossAccountsNotSupported StorageErrorCodeType = "CopyAcrossAccountsNotSupported" + // StorageErrorCodeCopyIDMismatch ... + StorageErrorCodeCopyIDMismatch StorageErrorCodeType = "CopyIdMismatch" + // StorageErrorCodeEmptyMetadataKey ... + StorageErrorCodeEmptyMetadataKey StorageErrorCodeType = "EmptyMetadataKey" + // StorageErrorCodeFeatureVersionMismatch ... + StorageErrorCodeFeatureVersionMismatch StorageErrorCodeType = "FeatureVersionMismatch" + // StorageErrorCodeIncrementalCopyBlobMismatch ... + StorageErrorCodeIncrementalCopyBlobMismatch StorageErrorCodeType = "IncrementalCopyBlobMismatch" + // StorageErrorCodeIncrementalCopyOfEralierVersionSnapshotNotAllowed ... + StorageErrorCodeIncrementalCopyOfEralierVersionSnapshotNotAllowed StorageErrorCodeType = "IncrementalCopyOfEralierVersionSnapshotNotAllowed" + // StorageErrorCodeIncrementalCopySourceMustBeSnapshot ... + StorageErrorCodeIncrementalCopySourceMustBeSnapshot StorageErrorCodeType = "IncrementalCopySourceMustBeSnapshot" + // StorageErrorCodeInfiniteLeaseDurationRequired ... + StorageErrorCodeInfiniteLeaseDurationRequired StorageErrorCodeType = "InfiniteLeaseDurationRequired" + // StorageErrorCodeInsufficientAccountPermissions ... + StorageErrorCodeInsufficientAccountPermissions StorageErrorCodeType = "InsufficientAccountPermissions" + // StorageErrorCodeInternalError ... + StorageErrorCodeInternalError StorageErrorCodeType = "InternalError" + // StorageErrorCodeInvalidAuthenticationInfo ... + StorageErrorCodeInvalidAuthenticationInfo StorageErrorCodeType = "InvalidAuthenticationInfo" + // StorageErrorCodeInvalidBlobOrBlock ... + StorageErrorCodeInvalidBlobOrBlock StorageErrorCodeType = "InvalidBlobOrBlock" + // StorageErrorCodeInvalidBlobTier ... + StorageErrorCodeInvalidBlobTier StorageErrorCodeType = "InvalidBlobTier" + // StorageErrorCodeInvalidBlobType ... + StorageErrorCodeInvalidBlobType StorageErrorCodeType = "InvalidBlobType" + // StorageErrorCodeInvalidBlockID ... + StorageErrorCodeInvalidBlockID StorageErrorCodeType = "InvalidBlockId" + // StorageErrorCodeInvalidBlockList ... + StorageErrorCodeInvalidBlockList StorageErrorCodeType = "InvalidBlockList" + // StorageErrorCodeInvalidHeaderValue ... + StorageErrorCodeInvalidHeaderValue StorageErrorCodeType = "InvalidHeaderValue" + // StorageErrorCodeInvalidHTTPVerb ... + StorageErrorCodeInvalidHTTPVerb StorageErrorCodeType = "InvalidHttpVerb" + // StorageErrorCodeInvalidInput ... + StorageErrorCodeInvalidInput StorageErrorCodeType = "InvalidInput" + // StorageErrorCodeInvalidMd5 ... + StorageErrorCodeInvalidMd5 StorageErrorCodeType = "InvalidMd5" + // StorageErrorCodeInvalidMetadata ... + StorageErrorCodeInvalidMetadata StorageErrorCodeType = "InvalidMetadata" + // StorageErrorCodeInvalidOperation ... + StorageErrorCodeInvalidOperation StorageErrorCodeType = "InvalidOperation" + // StorageErrorCodeInvalidPageRange ... + StorageErrorCodeInvalidPageRange StorageErrorCodeType = "InvalidPageRange" + // StorageErrorCodeInvalidQueryParameterValue ... + StorageErrorCodeInvalidQueryParameterValue StorageErrorCodeType = "InvalidQueryParameterValue" + // StorageErrorCodeInvalidRange ... + StorageErrorCodeInvalidRange StorageErrorCodeType = "InvalidRange" + // StorageErrorCodeInvalidResourceName ... + StorageErrorCodeInvalidResourceName StorageErrorCodeType = "InvalidResourceName" + // StorageErrorCodeInvalidSourceBlobType ... + StorageErrorCodeInvalidSourceBlobType StorageErrorCodeType = "InvalidSourceBlobType" + // StorageErrorCodeInvalidSourceBlobURL ... + StorageErrorCodeInvalidSourceBlobURL StorageErrorCodeType = "InvalidSourceBlobUrl" + // StorageErrorCodeInvalidURI ... + StorageErrorCodeInvalidURI StorageErrorCodeType = "InvalidUri" + // StorageErrorCodeInvalidVersionForPageBlobOperation ... + StorageErrorCodeInvalidVersionForPageBlobOperation StorageErrorCodeType = "InvalidVersionForPageBlobOperation" + // StorageErrorCodeInvalidXMLDocument ... + StorageErrorCodeInvalidXMLDocument StorageErrorCodeType = "InvalidXmlDocument" + // StorageErrorCodeInvalidXMLNodeValue ... + StorageErrorCodeInvalidXMLNodeValue StorageErrorCodeType = "InvalidXmlNodeValue" + // StorageErrorCodeLeaseAlreadyBroken ... + StorageErrorCodeLeaseAlreadyBroken StorageErrorCodeType = "LeaseAlreadyBroken" + // StorageErrorCodeLeaseAlreadyPresent ... + StorageErrorCodeLeaseAlreadyPresent StorageErrorCodeType = "LeaseAlreadyPresent" + // StorageErrorCodeLeaseIDMismatchWithBlobOperation ... + StorageErrorCodeLeaseIDMismatchWithBlobOperation StorageErrorCodeType = "LeaseIdMismatchWithBlobOperation" + // StorageErrorCodeLeaseIDMismatchWithContainerOperation ... + StorageErrorCodeLeaseIDMismatchWithContainerOperation StorageErrorCodeType = "LeaseIdMismatchWithContainerOperation" + // StorageErrorCodeLeaseIDMismatchWithLeaseOperation ... + StorageErrorCodeLeaseIDMismatchWithLeaseOperation StorageErrorCodeType = "LeaseIdMismatchWithLeaseOperation" + // StorageErrorCodeLeaseIDMissing ... + StorageErrorCodeLeaseIDMissing StorageErrorCodeType = "LeaseIdMissing" + // StorageErrorCodeLeaseIsBreakingAndCannotBeAcquired ... + StorageErrorCodeLeaseIsBreakingAndCannotBeAcquired StorageErrorCodeType = "LeaseIsBreakingAndCannotBeAcquired" + // StorageErrorCodeLeaseIsBreakingAndCannotBeChanged ... + StorageErrorCodeLeaseIsBreakingAndCannotBeChanged StorageErrorCodeType = "LeaseIsBreakingAndCannotBeChanged" + // StorageErrorCodeLeaseIsBrokenAndCannotBeRenewed ... + StorageErrorCodeLeaseIsBrokenAndCannotBeRenewed StorageErrorCodeType = "LeaseIsBrokenAndCannotBeRenewed" + // StorageErrorCodeLeaseLost ... + StorageErrorCodeLeaseLost StorageErrorCodeType = "LeaseLost" + // StorageErrorCodeLeaseNotPresentWithBlobOperation ... + StorageErrorCodeLeaseNotPresentWithBlobOperation StorageErrorCodeType = "LeaseNotPresentWithBlobOperation" + // StorageErrorCodeLeaseNotPresentWithContainerOperation ... + StorageErrorCodeLeaseNotPresentWithContainerOperation StorageErrorCodeType = "LeaseNotPresentWithContainerOperation" + // StorageErrorCodeLeaseNotPresentWithLeaseOperation ... + StorageErrorCodeLeaseNotPresentWithLeaseOperation StorageErrorCodeType = "LeaseNotPresentWithLeaseOperation" + // StorageErrorCodeMaxBlobSizeConditionNotMet ... + StorageErrorCodeMaxBlobSizeConditionNotMet StorageErrorCodeType = "MaxBlobSizeConditionNotMet" + // StorageErrorCodeMd5Mismatch ... + StorageErrorCodeMd5Mismatch StorageErrorCodeType = "Md5Mismatch" + // StorageErrorCodeMetadataTooLarge ... + StorageErrorCodeMetadataTooLarge StorageErrorCodeType = "MetadataTooLarge" + // StorageErrorCodeMissingContentLengthHeader ... + StorageErrorCodeMissingContentLengthHeader StorageErrorCodeType = "MissingContentLengthHeader" + // StorageErrorCodeMissingRequiredHeader ... + StorageErrorCodeMissingRequiredHeader StorageErrorCodeType = "MissingRequiredHeader" + // StorageErrorCodeMissingRequiredQueryParameter ... + StorageErrorCodeMissingRequiredQueryParameter StorageErrorCodeType = "MissingRequiredQueryParameter" + // StorageErrorCodeMissingRequiredXMLNode ... + StorageErrorCodeMissingRequiredXMLNode StorageErrorCodeType = "MissingRequiredXmlNode" + // StorageErrorCodeMultipleConditionHeadersNotSupported ... + StorageErrorCodeMultipleConditionHeadersNotSupported StorageErrorCodeType = "MultipleConditionHeadersNotSupported" + // StorageErrorCodeNone represents an empty StorageErrorCodeType. + StorageErrorCodeNone StorageErrorCodeType = "" + // StorageErrorCodeNoPendingCopyOperation ... + StorageErrorCodeNoPendingCopyOperation StorageErrorCodeType = "NoPendingCopyOperation" + // StorageErrorCodeOperationNotAllowedOnIncrementalCopyBlob ... + StorageErrorCodeOperationNotAllowedOnIncrementalCopyBlob StorageErrorCodeType = "OperationNotAllowedOnIncrementalCopyBlob" + // StorageErrorCodeOperationTimedOut ... + StorageErrorCodeOperationTimedOut StorageErrorCodeType = "OperationTimedOut" + // StorageErrorCodeOutOfRangeInput ... + StorageErrorCodeOutOfRangeInput StorageErrorCodeType = "OutOfRangeInput" + // StorageErrorCodeOutOfRangeQueryParameterValue ... + StorageErrorCodeOutOfRangeQueryParameterValue StorageErrorCodeType = "OutOfRangeQueryParameterValue" + // StorageErrorCodePendingCopyOperation ... + StorageErrorCodePendingCopyOperation StorageErrorCodeType = "PendingCopyOperation" + // StorageErrorCodePreviousSnapshotCannotBeNewer ... + StorageErrorCodePreviousSnapshotCannotBeNewer StorageErrorCodeType = "PreviousSnapshotCannotBeNewer" + // StorageErrorCodePreviousSnapshotNotFound ... + StorageErrorCodePreviousSnapshotNotFound StorageErrorCodeType = "PreviousSnapshotNotFound" + // StorageErrorCodePreviousSnapshotOperationNotSupported ... + StorageErrorCodePreviousSnapshotOperationNotSupported StorageErrorCodeType = "PreviousSnapshotOperationNotSupported" + // StorageErrorCodeRequestBodyTooLarge ... + StorageErrorCodeRequestBodyTooLarge StorageErrorCodeType = "RequestBodyTooLarge" + // StorageErrorCodeRequestURLFailedToParse ... + StorageErrorCodeRequestURLFailedToParse StorageErrorCodeType = "RequestUrlFailedToParse" + // StorageErrorCodeResourceAlreadyExists ... + StorageErrorCodeResourceAlreadyExists StorageErrorCodeType = "ResourceAlreadyExists" + // StorageErrorCodeResourceNotFound ... + StorageErrorCodeResourceNotFound StorageErrorCodeType = "ResourceNotFound" + // StorageErrorCodeResourceTypeMismatch ... + StorageErrorCodeResourceTypeMismatch StorageErrorCodeType = "ResourceTypeMismatch" + // StorageErrorCodeSequenceNumberConditionNotMet ... + StorageErrorCodeSequenceNumberConditionNotMet StorageErrorCodeType = "SequenceNumberConditionNotMet" + // StorageErrorCodeSequenceNumberIncrementTooLarge ... + StorageErrorCodeSequenceNumberIncrementTooLarge StorageErrorCodeType = "SequenceNumberIncrementTooLarge" + // StorageErrorCodeServerBusy ... + StorageErrorCodeServerBusy StorageErrorCodeType = "ServerBusy" + // StorageErrorCodeSnaphotOperationRateExceeded ... + StorageErrorCodeSnaphotOperationRateExceeded StorageErrorCodeType = "SnaphotOperationRateExceeded" + // StorageErrorCodeSnapshotCountExceeded ... + StorageErrorCodeSnapshotCountExceeded StorageErrorCodeType = "SnapshotCountExceeded" + // StorageErrorCodeSnapshotsPresent ... + StorageErrorCodeSnapshotsPresent StorageErrorCodeType = "SnapshotsPresent" + // StorageErrorCodeSourceConditionNotMet ... + StorageErrorCodeSourceConditionNotMet StorageErrorCodeType = "SourceConditionNotMet" + // StorageErrorCodeSystemInUse ... + StorageErrorCodeSystemInUse StorageErrorCodeType = "SystemInUse" + // StorageErrorCodeTargetConditionNotMet ... + StorageErrorCodeTargetConditionNotMet StorageErrorCodeType = "TargetConditionNotMet" + // StorageErrorCodeUnauthorizedBlobOverwrite ... + StorageErrorCodeUnauthorizedBlobOverwrite StorageErrorCodeType = "UnauthorizedBlobOverwrite" + // StorageErrorCodeUnsupportedHeader ... + StorageErrorCodeUnsupportedHeader StorageErrorCodeType = "UnsupportedHeader" + // StorageErrorCodeUnsupportedHTTPVerb ... + StorageErrorCodeUnsupportedHTTPVerb StorageErrorCodeType = "UnsupportedHttpVerb" + // StorageErrorCodeUnsupportedQueryParameter ... + StorageErrorCodeUnsupportedQueryParameter StorageErrorCodeType = "UnsupportedQueryParameter" + // StorageErrorCodeUnsupportedXMLNode ... + StorageErrorCodeUnsupportedXMLNode StorageErrorCodeType = "UnsupportedXmlNode" +) + +// PossibleStorageErrorCodeTypeValues returns an array of possible values for the StorageErrorCodeType const type. +func PossibleStorageErrorCodeTypeValues() []StorageErrorCodeType { + return []StorageErrorCodeType{StorageErrorCodeAccountAlreadyExists, StorageErrorCodeAccountBeingCreated, StorageErrorCodeAccountIsDisabled, StorageErrorCodeAppendPositionConditionNotMet, StorageErrorCodeAuthenticationFailed, StorageErrorCodeBlobAlreadyExists, StorageErrorCodeBlobArchived, StorageErrorCodeBlobBeingRehydrated, StorageErrorCodeBlobNotArchived, StorageErrorCodeBlobNotFound, StorageErrorCodeBlobOverwritten, StorageErrorCodeBlobTierInadequateForContentLength, StorageErrorCodeBlockCountExceedsLimit, StorageErrorCodeBlockListTooLong, StorageErrorCodeCannotChangeToLowerTier, StorageErrorCodeCannotVerifyCopySource, StorageErrorCodeConditionHeadersNotSupported, StorageErrorCodeConditionNotMet, StorageErrorCodeContainerAlreadyExists, StorageErrorCodeContainerBeingDeleted, StorageErrorCodeContainerDisabled, StorageErrorCodeContainerNotFound, StorageErrorCodeContentLengthLargerThanTierLimit, StorageErrorCodeCopyAcrossAccountsNotSupported, StorageErrorCodeCopyIDMismatch, StorageErrorCodeEmptyMetadataKey, StorageErrorCodeFeatureVersionMismatch, StorageErrorCodeIncrementalCopyBlobMismatch, StorageErrorCodeIncrementalCopyOfEralierVersionSnapshotNotAllowed, StorageErrorCodeIncrementalCopySourceMustBeSnapshot, StorageErrorCodeInfiniteLeaseDurationRequired, StorageErrorCodeInsufficientAccountPermissions, StorageErrorCodeInternalError, StorageErrorCodeInvalidAuthenticationInfo, StorageErrorCodeInvalidBlobOrBlock, StorageErrorCodeInvalidBlobTier, StorageErrorCodeInvalidBlobType, StorageErrorCodeInvalidBlockID, StorageErrorCodeInvalidBlockList, StorageErrorCodeInvalidHeaderValue, StorageErrorCodeInvalidHTTPVerb, StorageErrorCodeInvalidInput, StorageErrorCodeInvalidMd5, StorageErrorCodeInvalidMetadata, StorageErrorCodeInvalidOperation, StorageErrorCodeInvalidPageRange, StorageErrorCodeInvalidQueryParameterValue, StorageErrorCodeInvalidRange, StorageErrorCodeInvalidResourceName, StorageErrorCodeInvalidSourceBlobType, StorageErrorCodeInvalidSourceBlobURL, StorageErrorCodeInvalidURI, StorageErrorCodeInvalidVersionForPageBlobOperation, StorageErrorCodeInvalidXMLDocument, StorageErrorCodeInvalidXMLNodeValue, StorageErrorCodeLeaseAlreadyBroken, StorageErrorCodeLeaseAlreadyPresent, StorageErrorCodeLeaseIDMismatchWithBlobOperation, StorageErrorCodeLeaseIDMismatchWithContainerOperation, StorageErrorCodeLeaseIDMismatchWithLeaseOperation, StorageErrorCodeLeaseIDMissing, StorageErrorCodeLeaseIsBreakingAndCannotBeAcquired, StorageErrorCodeLeaseIsBreakingAndCannotBeChanged, StorageErrorCodeLeaseIsBrokenAndCannotBeRenewed, StorageErrorCodeLeaseLost, StorageErrorCodeLeaseNotPresentWithBlobOperation, StorageErrorCodeLeaseNotPresentWithContainerOperation, StorageErrorCodeLeaseNotPresentWithLeaseOperation, StorageErrorCodeMaxBlobSizeConditionNotMet, StorageErrorCodeMd5Mismatch, StorageErrorCodeMetadataTooLarge, StorageErrorCodeMissingContentLengthHeader, StorageErrorCodeMissingRequiredHeader, StorageErrorCodeMissingRequiredQueryParameter, StorageErrorCodeMissingRequiredXMLNode, StorageErrorCodeMultipleConditionHeadersNotSupported, StorageErrorCodeNone, StorageErrorCodeNoPendingCopyOperation, StorageErrorCodeOperationNotAllowedOnIncrementalCopyBlob, StorageErrorCodeOperationTimedOut, StorageErrorCodeOutOfRangeInput, StorageErrorCodeOutOfRangeQueryParameterValue, StorageErrorCodePendingCopyOperation, StorageErrorCodePreviousSnapshotCannotBeNewer, StorageErrorCodePreviousSnapshotNotFound, StorageErrorCodePreviousSnapshotOperationNotSupported, StorageErrorCodeRequestBodyTooLarge, StorageErrorCodeRequestURLFailedToParse, StorageErrorCodeResourceAlreadyExists, StorageErrorCodeResourceNotFound, StorageErrorCodeResourceTypeMismatch, StorageErrorCodeSequenceNumberConditionNotMet, StorageErrorCodeSequenceNumberIncrementTooLarge, StorageErrorCodeServerBusy, StorageErrorCodeSnaphotOperationRateExceeded, StorageErrorCodeSnapshotCountExceeded, StorageErrorCodeSnapshotsPresent, StorageErrorCodeSourceConditionNotMet, StorageErrorCodeSystemInUse, StorageErrorCodeTargetConditionNotMet, StorageErrorCodeUnauthorizedBlobOverwrite, StorageErrorCodeUnsupportedHeader, StorageErrorCodeUnsupportedHTTPVerb, StorageErrorCodeUnsupportedQueryParameter, StorageErrorCodeUnsupportedXMLNode} +} + +// AccessPolicy - An Access policy +type AccessPolicy struct { + // Start - the date-time the policy is active + Start time.Time `xml:"Start"` + // Expiry - the date-time the policy expires + Expiry time.Time `xml:"Expiry"` + // Permission - the permissions for the acl policy + Permission string `xml:"Permission"` +} + +// MarshalXML implements the xml.Marshaler interface for AccessPolicy. +func (ap AccessPolicy) MarshalXML(e *xml.Encoder, start xml.StartElement) error { + if reflect.TypeOf((*AccessPolicy)(nil)).Elem().Size() != reflect.TypeOf((*accessPolicy)(nil)).Elem().Size() { + panic("size mismatch between AccessPolicy and accessPolicy") + } + ap2 := (*accessPolicy)(unsafe.Pointer(&ap)) + return e.EncodeElement(*ap2, start) +} + +// UnmarshalXML implements the xml.Unmarshaler interface for AccessPolicy. +func (ap *AccessPolicy) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { + if reflect.TypeOf((*AccessPolicy)(nil)).Elem().Size() != reflect.TypeOf((*accessPolicy)(nil)).Elem().Size() { + panic("size mismatch between AccessPolicy and accessPolicy") + } + ap2 := (*accessPolicy)(unsafe.Pointer(ap)) + return d.DecodeElement(ap2, &start) +} + +// AppendBlobAppendBlockResponse ... +type AppendBlobAppendBlockResponse struct { + rawResponse *http.Response +} + +// Response returns the raw HTTP response object. +func (ababr AppendBlobAppendBlockResponse) Response() *http.Response { + return ababr.rawResponse +} + +// StatusCode returns the HTTP status code of the response, e.g. 200. +func (ababr AppendBlobAppendBlockResponse) StatusCode() int { + return ababr.rawResponse.StatusCode +} + +// Status returns the HTTP status message of the response, e.g. "200 OK". +func (ababr AppendBlobAppendBlockResponse) Status() string { + return ababr.rawResponse.Status +} + +// BlobAppendOffset returns the value for header x-ms-blob-append-offset. +func (ababr AppendBlobAppendBlockResponse) BlobAppendOffset() string { + return ababr.rawResponse.Header.Get("x-ms-blob-append-offset") +} + +// BlobCommittedBlockCount returns the value for header x-ms-blob-committed-block-count. +func (ababr AppendBlobAppendBlockResponse) BlobCommittedBlockCount() int32 { + s := ababr.rawResponse.Header.Get("x-ms-blob-committed-block-count") + if s == "" { + return -1 + } + i, err := strconv.ParseInt(s, 10, 32) + if err != nil { + panic(err) + } + return int32(i) +} + +// ContentMD5 returns the value for header Content-MD5. +func (ababr AppendBlobAppendBlockResponse) ContentMD5() []byte { + s := ababr.rawResponse.Header.Get("Content-MD5") + if s == "" { + return nil + } + b, err := base64.StdEncoding.DecodeString(s) + if err != nil { + panic(err) + } + return b +} + +// Date returns the value for header Date. +func (ababr AppendBlobAppendBlockResponse) Date() time.Time { + s := ababr.rawResponse.Header.Get("Date") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + panic(err) + } + return t +} + +// ErrorCode returns the value for header x-ms-error-code. +func (ababr AppendBlobAppendBlockResponse) ErrorCode() string { + return ababr.rawResponse.Header.Get("x-ms-error-code") +} + +// ETag returns the value for header ETag. +func (ababr AppendBlobAppendBlockResponse) ETag() ETag { + return ETag(ababr.rawResponse.Header.Get("ETag")) +} + +// LastModified returns the value for header Last-Modified. +func (ababr AppendBlobAppendBlockResponse) LastModified() time.Time { + s := ababr.rawResponse.Header.Get("Last-Modified") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + panic(err) + } + return t +} + +// RequestID returns the value for header x-ms-request-id. +func (ababr AppendBlobAppendBlockResponse) RequestID() string { + return ababr.rawResponse.Header.Get("x-ms-request-id") +} + +// Version returns the value for header x-ms-version. +func (ababr AppendBlobAppendBlockResponse) Version() string { + return ababr.rawResponse.Header.Get("x-ms-version") +} + +// AppendBlobCreateResponse ... +type AppendBlobCreateResponse struct { + rawResponse *http.Response +} + +// Response returns the raw HTTP response object. +func (abcr AppendBlobCreateResponse) Response() *http.Response { + return abcr.rawResponse +} + +// StatusCode returns the HTTP status code of the response, e.g. 200. +func (abcr AppendBlobCreateResponse) StatusCode() int { + return abcr.rawResponse.StatusCode +} + +// Status returns the HTTP status message of the response, e.g. "200 OK". +func (abcr AppendBlobCreateResponse) Status() string { + return abcr.rawResponse.Status +} + +// ContentMD5 returns the value for header Content-MD5. +func (abcr AppendBlobCreateResponse) ContentMD5() []byte { + s := abcr.rawResponse.Header.Get("Content-MD5") + if s == "" { + return nil + } + b, err := base64.StdEncoding.DecodeString(s) + if err != nil { + panic(err) + } + return b +} + +// Date returns the value for header Date. +func (abcr AppendBlobCreateResponse) Date() time.Time { + s := abcr.rawResponse.Header.Get("Date") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + panic(err) + } + return t +} + +// ErrorCode returns the value for header x-ms-error-code. +func (abcr AppendBlobCreateResponse) ErrorCode() string { + return abcr.rawResponse.Header.Get("x-ms-error-code") +} + +// ETag returns the value for header ETag. +func (abcr AppendBlobCreateResponse) ETag() ETag { + return ETag(abcr.rawResponse.Header.Get("ETag")) +} + +// IsServerEncrypted returns the value for header x-ms-request-server-encrypted. +func (abcr AppendBlobCreateResponse) IsServerEncrypted() string { + return abcr.rawResponse.Header.Get("x-ms-request-server-encrypted") +} + +// LastModified returns the value for header Last-Modified. +func (abcr AppendBlobCreateResponse) LastModified() time.Time { + s := abcr.rawResponse.Header.Get("Last-Modified") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + panic(err) + } + return t +} + +// RequestID returns the value for header x-ms-request-id. +func (abcr AppendBlobCreateResponse) RequestID() string { + return abcr.rawResponse.Header.Get("x-ms-request-id") +} + +// Version returns the value for header x-ms-version. +func (abcr AppendBlobCreateResponse) Version() string { + return abcr.rawResponse.Header.Get("x-ms-version") +} + +// BlobAbortCopyFromURLResponse ... +type BlobAbortCopyFromURLResponse struct { + rawResponse *http.Response +} + +// Response returns the raw HTTP response object. +func (bacfur BlobAbortCopyFromURLResponse) Response() *http.Response { + return bacfur.rawResponse +} + +// StatusCode returns the HTTP status code of the response, e.g. 200. +func (bacfur BlobAbortCopyFromURLResponse) StatusCode() int { + return bacfur.rawResponse.StatusCode +} + +// Status returns the HTTP status message of the response, e.g. "200 OK". +func (bacfur BlobAbortCopyFromURLResponse) Status() string { + return bacfur.rawResponse.Status +} + +// Date returns the value for header Date. +func (bacfur BlobAbortCopyFromURLResponse) Date() time.Time { + s := bacfur.rawResponse.Header.Get("Date") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + panic(err) + } + return t +} + +// ErrorCode returns the value for header x-ms-error-code. +func (bacfur BlobAbortCopyFromURLResponse) ErrorCode() string { + return bacfur.rawResponse.Header.Get("x-ms-error-code") +} + +// RequestID returns the value for header x-ms-request-id. +func (bacfur BlobAbortCopyFromURLResponse) RequestID() string { + return bacfur.rawResponse.Header.Get("x-ms-request-id") +} + +// Version returns the value for header x-ms-version. +func (bacfur BlobAbortCopyFromURLResponse) Version() string { + return bacfur.rawResponse.Header.Get("x-ms-version") +} + +// BlobAcquireLeaseResponse ... +type BlobAcquireLeaseResponse struct { + rawResponse *http.Response +} + +// Response returns the raw HTTP response object. +func (balr BlobAcquireLeaseResponse) Response() *http.Response { + return balr.rawResponse +} + +// StatusCode returns the HTTP status code of the response, e.g. 200. +func (balr BlobAcquireLeaseResponse) StatusCode() int { + return balr.rawResponse.StatusCode +} + +// Status returns the HTTP status message of the response, e.g. "200 OK". +func (balr BlobAcquireLeaseResponse) Status() string { + return balr.rawResponse.Status +} + +// Date returns the value for header Date. +func (balr BlobAcquireLeaseResponse) Date() time.Time { + s := balr.rawResponse.Header.Get("Date") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + panic(err) + } + return t +} + +// ErrorCode returns the value for header x-ms-error-code. +func (balr BlobAcquireLeaseResponse) ErrorCode() string { + return balr.rawResponse.Header.Get("x-ms-error-code") +} + +// ETag returns the value for header ETag. +func (balr BlobAcquireLeaseResponse) ETag() ETag { + return ETag(balr.rawResponse.Header.Get("ETag")) +} + +// LastModified returns the value for header Last-Modified. +func (balr BlobAcquireLeaseResponse) LastModified() time.Time { + s := balr.rawResponse.Header.Get("Last-Modified") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + panic(err) + } + return t +} + +// LeaseID returns the value for header x-ms-lease-id. +func (balr BlobAcquireLeaseResponse) LeaseID() string { + return balr.rawResponse.Header.Get("x-ms-lease-id") +} + +// RequestID returns the value for header x-ms-request-id. +func (balr BlobAcquireLeaseResponse) RequestID() string { + return balr.rawResponse.Header.Get("x-ms-request-id") +} + +// Version returns the value for header x-ms-version. +func (balr BlobAcquireLeaseResponse) Version() string { + return balr.rawResponse.Header.Get("x-ms-version") +} + +// BlobBreakLeaseResponse ... +type BlobBreakLeaseResponse struct { + rawResponse *http.Response +} + +// Response returns the raw HTTP response object. +func (bblr BlobBreakLeaseResponse) Response() *http.Response { + return bblr.rawResponse +} + +// StatusCode returns the HTTP status code of the response, e.g. 200. +func (bblr BlobBreakLeaseResponse) StatusCode() int { + return bblr.rawResponse.StatusCode +} + +// Status returns the HTTP status message of the response, e.g. "200 OK". +func (bblr BlobBreakLeaseResponse) Status() string { + return bblr.rawResponse.Status +} + +// Date returns the value for header Date. +func (bblr BlobBreakLeaseResponse) Date() time.Time { + s := bblr.rawResponse.Header.Get("Date") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + panic(err) + } + return t +} + +// ErrorCode returns the value for header x-ms-error-code. +func (bblr BlobBreakLeaseResponse) ErrorCode() string { + return bblr.rawResponse.Header.Get("x-ms-error-code") +} + +// ETag returns the value for header ETag. +func (bblr BlobBreakLeaseResponse) ETag() ETag { + return ETag(bblr.rawResponse.Header.Get("ETag")) +} + +// LastModified returns the value for header Last-Modified. +func (bblr BlobBreakLeaseResponse) LastModified() time.Time { + s := bblr.rawResponse.Header.Get("Last-Modified") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + panic(err) + } + return t +} + +// LeaseTime returns the value for header x-ms-lease-time. +func (bblr BlobBreakLeaseResponse) LeaseTime() int32 { + s := bblr.rawResponse.Header.Get("x-ms-lease-time") + if s == "" { + return -1 + } + i, err := strconv.ParseInt(s, 10, 32) + if err != nil { + panic(err) + } + return int32(i) +} + +// RequestID returns the value for header x-ms-request-id. +func (bblr BlobBreakLeaseResponse) RequestID() string { + return bblr.rawResponse.Header.Get("x-ms-request-id") +} + +// Version returns the value for header x-ms-version. +func (bblr BlobBreakLeaseResponse) Version() string { + return bblr.rawResponse.Header.Get("x-ms-version") +} + +// BlobChangeLeaseResponse ... +type BlobChangeLeaseResponse struct { + rawResponse *http.Response +} + +// Response returns the raw HTTP response object. +func (bclr BlobChangeLeaseResponse) Response() *http.Response { + return bclr.rawResponse +} + +// StatusCode returns the HTTP status code of the response, e.g. 200. +func (bclr BlobChangeLeaseResponse) StatusCode() int { + return bclr.rawResponse.StatusCode +} + +// Status returns the HTTP status message of the response, e.g. "200 OK". +func (bclr BlobChangeLeaseResponse) Status() string { + return bclr.rawResponse.Status +} + +// Date returns the value for header Date. +func (bclr BlobChangeLeaseResponse) Date() time.Time { + s := bclr.rawResponse.Header.Get("Date") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + panic(err) + } + return t +} + +// ErrorCode returns the value for header x-ms-error-code. +func (bclr BlobChangeLeaseResponse) ErrorCode() string { + return bclr.rawResponse.Header.Get("x-ms-error-code") +} + +// ETag returns the value for header ETag. +func (bclr BlobChangeLeaseResponse) ETag() ETag { + return ETag(bclr.rawResponse.Header.Get("ETag")) +} + +// LastModified returns the value for header Last-Modified. +func (bclr BlobChangeLeaseResponse) LastModified() time.Time { + s := bclr.rawResponse.Header.Get("Last-Modified") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + panic(err) + } + return t +} + +// LeaseID returns the value for header x-ms-lease-id. +func (bclr BlobChangeLeaseResponse) LeaseID() string { + return bclr.rawResponse.Header.Get("x-ms-lease-id") +} + +// RequestID returns the value for header x-ms-request-id. +func (bclr BlobChangeLeaseResponse) RequestID() string { + return bclr.rawResponse.Header.Get("x-ms-request-id") +} + +// Version returns the value for header x-ms-version. +func (bclr BlobChangeLeaseResponse) Version() string { + return bclr.rawResponse.Header.Get("x-ms-version") +} + +// BlobCreateSnapshotResponse ... +type BlobCreateSnapshotResponse struct { + rawResponse *http.Response +} + +// Response returns the raw HTTP response object. +func (bcsr BlobCreateSnapshotResponse) Response() *http.Response { + return bcsr.rawResponse +} + +// StatusCode returns the HTTP status code of the response, e.g. 200. +func (bcsr BlobCreateSnapshotResponse) StatusCode() int { + return bcsr.rawResponse.StatusCode +} + +// Status returns the HTTP status message of the response, e.g. "200 OK". +func (bcsr BlobCreateSnapshotResponse) Status() string { + return bcsr.rawResponse.Status +} + +// Date returns the value for header Date. +func (bcsr BlobCreateSnapshotResponse) Date() time.Time { + s := bcsr.rawResponse.Header.Get("Date") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + panic(err) + } + return t +} + +// ErrorCode returns the value for header x-ms-error-code. +func (bcsr BlobCreateSnapshotResponse) ErrorCode() string { + return bcsr.rawResponse.Header.Get("x-ms-error-code") +} + +// ETag returns the value for header ETag. +func (bcsr BlobCreateSnapshotResponse) ETag() ETag { + return ETag(bcsr.rawResponse.Header.Get("ETag")) +} + +// LastModified returns the value for header Last-Modified. +func (bcsr BlobCreateSnapshotResponse) LastModified() time.Time { + s := bcsr.rawResponse.Header.Get("Last-Modified") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + panic(err) + } + return t +} + +// RequestID returns the value for header x-ms-request-id. +func (bcsr BlobCreateSnapshotResponse) RequestID() string { + return bcsr.rawResponse.Header.Get("x-ms-request-id") +} + +// Snapshot returns the value for header x-ms-snapshot. +func (bcsr BlobCreateSnapshotResponse) Snapshot() string { + return bcsr.rawResponse.Header.Get("x-ms-snapshot") +} + +// Version returns the value for header x-ms-version. +func (bcsr BlobCreateSnapshotResponse) Version() string { + return bcsr.rawResponse.Header.Get("x-ms-version") +} + +// BlobDeleteResponse ... +type BlobDeleteResponse struct { + rawResponse *http.Response +} + +// Response returns the raw HTTP response object. +func (bdr BlobDeleteResponse) Response() *http.Response { + return bdr.rawResponse +} + +// StatusCode returns the HTTP status code of the response, e.g. 200. +func (bdr BlobDeleteResponse) StatusCode() int { + return bdr.rawResponse.StatusCode +} + +// Status returns the HTTP status message of the response, e.g. "200 OK". +func (bdr BlobDeleteResponse) Status() string { + return bdr.rawResponse.Status +} + +// Date returns the value for header Date. +func (bdr BlobDeleteResponse) Date() time.Time { + s := bdr.rawResponse.Header.Get("Date") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + panic(err) + } + return t +} + +// ErrorCode returns the value for header x-ms-error-code. +func (bdr BlobDeleteResponse) ErrorCode() string { + return bdr.rawResponse.Header.Get("x-ms-error-code") +} + +// RequestID returns the value for header x-ms-request-id. +func (bdr BlobDeleteResponse) RequestID() string { + return bdr.rawResponse.Header.Get("x-ms-request-id") +} + +// Version returns the value for header x-ms-version. +func (bdr BlobDeleteResponse) Version() string { + return bdr.rawResponse.Header.Get("x-ms-version") +} + +// BlobFlatList ... +type BlobFlatList struct { + BlobItems []BlobItem `xml:"Blob"` +} + +// BlobGetPropertiesResponse ... +type BlobGetPropertiesResponse struct { + rawResponse *http.Response +} + +// NewMetadata returns user-defined key/value pairs. +func (bgpr BlobGetPropertiesResponse) NewMetadata() Metadata { + md := Metadata{} + for k, v := range bgpr.rawResponse.Header { + if len(k) > mdPrefixLen { + if prefix := k[0:mdPrefixLen]; strings.EqualFold(prefix, mdPrefix) { + md[strings.ToLower(k[mdPrefixLen:])] = v[0] + } + } + } + return md +} + +// Response returns the raw HTTP response object. +func (bgpr BlobGetPropertiesResponse) Response() *http.Response { + return bgpr.rawResponse +} + +// StatusCode returns the HTTP status code of the response, e.g. 200. +func (bgpr BlobGetPropertiesResponse) StatusCode() int { + return bgpr.rawResponse.StatusCode +} + +// Status returns the HTTP status message of the response, e.g. "200 OK". +func (bgpr BlobGetPropertiesResponse) Status() string { + return bgpr.rawResponse.Status +} + +// AcceptRanges returns the value for header Accept-Ranges. +func (bgpr BlobGetPropertiesResponse) AcceptRanges() string { + return bgpr.rawResponse.Header.Get("Accept-Ranges") +} + +// AccessTier returns the value for header x-ms-access-tier. +func (bgpr BlobGetPropertiesResponse) AccessTier() string { + return bgpr.rawResponse.Header.Get("x-ms-access-tier") +} + +// AccessTierInferred returns the value for header x-ms-access-tier-inferred. +func (bgpr BlobGetPropertiesResponse) AccessTierInferred() string { + return bgpr.rawResponse.Header.Get("x-ms-access-tier-inferred") +} + +// ArchiveStatus returns the value for header x-ms-archive-status. +func (bgpr BlobGetPropertiesResponse) ArchiveStatus() string { + return bgpr.rawResponse.Header.Get("x-ms-archive-status") +} + +// BlobCommittedBlockCount returns the value for header x-ms-blob-committed-block-count. +func (bgpr BlobGetPropertiesResponse) BlobCommittedBlockCount() int32 { + s := bgpr.rawResponse.Header.Get("x-ms-blob-committed-block-count") + if s == "" { + return -1 + } + i, err := strconv.ParseInt(s, 10, 32) + if err != nil { + panic(err) + } + return int32(i) +} + +// BlobSequenceNumber returns the value for header x-ms-blob-sequence-number. +func (bgpr BlobGetPropertiesResponse) BlobSequenceNumber() int64 { + s := bgpr.rawResponse.Header.Get("x-ms-blob-sequence-number") + if s == "" { + return -1 + } + i, err := strconv.ParseInt(s, 10, 64) + if err != nil { + panic(err) + } + return i +} + +// BlobType returns the value for header x-ms-blob-type. +func (bgpr BlobGetPropertiesResponse) BlobType() BlobType { + return BlobType(bgpr.rawResponse.Header.Get("x-ms-blob-type")) +} + +// CacheControl returns the value for header Cache-Control. +func (bgpr BlobGetPropertiesResponse) CacheControl() string { + return bgpr.rawResponse.Header.Get("Cache-Control") +} + +// ContentDisposition returns the value for header Content-Disposition. +func (bgpr BlobGetPropertiesResponse) ContentDisposition() string { + return bgpr.rawResponse.Header.Get("Content-Disposition") +} + +// ContentEncoding returns the value for header Content-Encoding. +func (bgpr BlobGetPropertiesResponse) ContentEncoding() string { + return bgpr.rawResponse.Header.Get("Content-Encoding") +} + +// ContentLanguage returns the value for header Content-Language. +func (bgpr BlobGetPropertiesResponse) ContentLanguage() string { + return bgpr.rawResponse.Header.Get("Content-Language") +} + +// ContentLength returns the value for header Content-Length. +func (bgpr BlobGetPropertiesResponse) ContentLength() int64 { + s := bgpr.rawResponse.Header.Get("Content-Length") + if s == "" { + return -1 + } + i, err := strconv.ParseInt(s, 10, 64) + if err != nil { + panic(err) + } + return i +} + +// ContentMD5 returns the value for header Content-MD5. +func (bgpr BlobGetPropertiesResponse) ContentMD5() []byte { + s := bgpr.rawResponse.Header.Get("Content-MD5") + if s == "" { + return nil + } + b, err := base64.StdEncoding.DecodeString(s) + if err != nil { + panic(err) + } + return b +} + +// ContentType returns the value for header Content-Type. +func (bgpr BlobGetPropertiesResponse) ContentType() string { + return bgpr.rawResponse.Header.Get("Content-Type") +} + +// CopyCompletionTime returns the value for header x-ms-copy-completion-time. +func (bgpr BlobGetPropertiesResponse) CopyCompletionTime() time.Time { + s := bgpr.rawResponse.Header.Get("x-ms-copy-completion-time") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + panic(err) + } + return t +} + +// CopyID returns the value for header x-ms-copy-id. +func (bgpr BlobGetPropertiesResponse) CopyID() string { + return bgpr.rawResponse.Header.Get("x-ms-copy-id") +} + +// CopyProgress returns the value for header x-ms-copy-progress. +func (bgpr BlobGetPropertiesResponse) CopyProgress() string { + return bgpr.rawResponse.Header.Get("x-ms-copy-progress") +} + +// CopySource returns the value for header x-ms-copy-source. +func (bgpr BlobGetPropertiesResponse) CopySource() string { + return bgpr.rawResponse.Header.Get("x-ms-copy-source") +} + +// CopyStatus returns the value for header x-ms-copy-status. +func (bgpr BlobGetPropertiesResponse) CopyStatus() CopyStatusType { + return CopyStatusType(bgpr.rawResponse.Header.Get("x-ms-copy-status")) +} + +// CopyStatusDescription returns the value for header x-ms-copy-status-description. +func (bgpr BlobGetPropertiesResponse) CopyStatusDescription() string { + return bgpr.rawResponse.Header.Get("x-ms-copy-status-description") +} + +// Date returns the value for header Date. +func (bgpr BlobGetPropertiesResponse) Date() time.Time { + s := bgpr.rawResponse.Header.Get("Date") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + panic(err) + } + return t +} + +// DestinationSnapshot returns the value for header x-ms-copy-destination-snapshot. +func (bgpr BlobGetPropertiesResponse) DestinationSnapshot() string { + return bgpr.rawResponse.Header.Get("x-ms-copy-destination-snapshot") +} + +// ErrorCode returns the value for header x-ms-error-code. +func (bgpr BlobGetPropertiesResponse) ErrorCode() string { + return bgpr.rawResponse.Header.Get("x-ms-error-code") +} + +// ETag returns the value for header ETag. +func (bgpr BlobGetPropertiesResponse) ETag() ETag { + return ETag(bgpr.rawResponse.Header.Get("ETag")) +} + +// IsIncrementalCopy returns the value for header x-ms-incremental-copy. +func (bgpr BlobGetPropertiesResponse) IsIncrementalCopy() string { + return bgpr.rawResponse.Header.Get("x-ms-incremental-copy") +} + +// IsServerEncrypted returns the value for header x-ms-server-encrypted. +func (bgpr BlobGetPropertiesResponse) IsServerEncrypted() string { + return bgpr.rawResponse.Header.Get("x-ms-server-encrypted") +} + +// LastModified returns the value for header Last-Modified. +func (bgpr BlobGetPropertiesResponse) LastModified() time.Time { + s := bgpr.rawResponse.Header.Get("Last-Modified") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + panic(err) + } + return t +} + +// LeaseDuration returns the value for header x-ms-lease-duration. +func (bgpr BlobGetPropertiesResponse) LeaseDuration() LeaseDurationType { + return LeaseDurationType(bgpr.rawResponse.Header.Get("x-ms-lease-duration")) +} + +// LeaseState returns the value for header x-ms-lease-state. +func (bgpr BlobGetPropertiesResponse) LeaseState() LeaseStateType { + return LeaseStateType(bgpr.rawResponse.Header.Get("x-ms-lease-state")) +} + +// LeaseStatus returns the value for header x-ms-lease-status. +func (bgpr BlobGetPropertiesResponse) LeaseStatus() LeaseStatusType { + return LeaseStatusType(bgpr.rawResponse.Header.Get("x-ms-lease-status")) +} + +// RequestID returns the value for header x-ms-request-id. +func (bgpr BlobGetPropertiesResponse) RequestID() string { + return bgpr.rawResponse.Header.Get("x-ms-request-id") +} + +// Version returns the value for header x-ms-version. +func (bgpr BlobGetPropertiesResponse) Version() string { + return bgpr.rawResponse.Header.Get("x-ms-version") +} + +// BlobHierarchyList ... +type BlobHierarchyList struct { + BlobPrefixes []BlobPrefix `xml:"BlobPrefix"` + BlobItems []BlobItem `xml:"Blob"` +} + +// BlobItem - An Azure Storage blob +type BlobItem struct { + Name string `xml:"Name"` + Deleted bool `xml:"Deleted"` + Snapshot string `xml:"Snapshot"` + Properties BlobProperties `xml:"Properties"` + Metadata Metadata `xml:"Metadata"` +} + +// BlobPrefix ... +type BlobPrefix struct { + Name string `xml:"Name"` +} + +// BlobProperties - Properties of a blob +type BlobProperties struct { + LastModified time.Time `xml:"Last-Modified"` + Etag ETag `xml:"Etag"` + // ContentLength - Size in bytes + ContentLength *int64 `xml:"Content-Length"` + ContentType *string `xml:"Content-Type"` + ContentEncoding *string `xml:"Content-Encoding"` + ContentLanguage *string `xml:"Content-Language"` + ContentMD5 []byte `xml:"Content-MD5"` + ContentDisposition *string `xml:"Content-Disposition"` + CacheControl *string `xml:"Cache-Control"` + BlobSequenceNumber *int64 `xml:"x-ms-blob-sequence-number"` + // BlobType - Possible values include: 'BlobBlockBlob', 'BlobPageBlob', 'BlobAppendBlob', 'BlobNone' + BlobType BlobType `xml:"BlobType"` + // LeaseStatus - Possible values include: 'LeaseStatusLocked', 'LeaseStatusUnlocked', 'LeaseStatusNone' + LeaseStatus LeaseStatusType `xml:"LeaseStatus"` + // LeaseState - Possible values include: 'LeaseStateAvailable', 'LeaseStateLeased', 'LeaseStateExpired', 'LeaseStateBreaking', 'LeaseStateBroken', 'LeaseStateNone' + LeaseState LeaseStateType `xml:"LeaseState"` + // LeaseDuration - Possible values include: 'LeaseDurationInfinite', 'LeaseDurationFixed', 'LeaseDurationNone' + LeaseDuration LeaseDurationType `xml:"LeaseDuration"` + CopyID *string `xml:"CopyId"` + // CopyStatus - Possible values include: 'CopyStatusPending', 'CopyStatusSuccess', 'CopyStatusAborted', 'CopyStatusFailed', 'CopyStatusNone' + CopyStatus CopyStatusType `xml:"CopyStatus"` + CopySource *string `xml:"CopySource"` + CopyProgress *string `xml:"CopyProgress"` + CopyCompletionTime *time.Time `xml:"CopyCompletionTime"` + CopyStatusDescription *string `xml:"CopyStatusDescription"` + ServerEncrypted *bool `xml:"ServerEncrypted"` + IncrementalCopy *bool `xml:"IncrementalCopy"` + DestinationSnapshot *string `xml:"DestinationSnapshot"` + DeletedTime *time.Time `xml:"DeletedTime"` + RemainingRetentionDays *int32 `xml:"RemainingRetentionDays"` + // AccessTier - Possible values include: 'AccessTierP4', 'AccessTierP6', 'AccessTierP10', 'AccessTierP20', 'AccessTierP30', 'AccessTierP40', 'AccessTierP50', 'AccessTierHot', 'AccessTierCool', 'AccessTierArchive', 'AccessTierNone' + AccessTier AccessTierType `xml:"AccessTier"` + AccessTierInferred *bool `xml:"AccessTierInferred"` + // ArchiveStatus - Possible values include: 'ArchiveStatusRehydratePendingToHot', 'ArchiveStatusRehydratePendingToCool', 'ArchiveStatusNone' + ArchiveStatus ArchiveStatusType `xml:"ArchiveStatus"` +} + +// MarshalXML implements the xml.Marshaler interface for BlobProperties. +func (bp BlobProperties) MarshalXML(e *xml.Encoder, start xml.StartElement) error { + if reflect.TypeOf((*BlobProperties)(nil)).Elem().Size() != reflect.TypeOf((*blobProperties)(nil)).Elem().Size() { + panic("size mismatch between BlobProperties and blobProperties") + } + bp2 := (*blobProperties)(unsafe.Pointer(&bp)) + return e.EncodeElement(*bp2, start) +} + +// UnmarshalXML implements the xml.Unmarshaler interface for BlobProperties. +func (bp *BlobProperties) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { + if reflect.TypeOf((*BlobProperties)(nil)).Elem().Size() != reflect.TypeOf((*blobProperties)(nil)).Elem().Size() { + panic("size mismatch between BlobProperties and blobProperties") + } + bp2 := (*blobProperties)(unsafe.Pointer(bp)) + return d.DecodeElement(bp2, &start) +} + +// BlobReleaseLeaseResponse ... +type BlobReleaseLeaseResponse struct { + rawResponse *http.Response +} + +// Response returns the raw HTTP response object. +func (brlr BlobReleaseLeaseResponse) Response() *http.Response { + return brlr.rawResponse +} + +// StatusCode returns the HTTP status code of the response, e.g. 200. +func (brlr BlobReleaseLeaseResponse) StatusCode() int { + return brlr.rawResponse.StatusCode +} + +// Status returns the HTTP status message of the response, e.g. "200 OK". +func (brlr BlobReleaseLeaseResponse) Status() string { + return brlr.rawResponse.Status +} + +// Date returns the value for header Date. +func (brlr BlobReleaseLeaseResponse) Date() time.Time { + s := brlr.rawResponse.Header.Get("Date") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + panic(err) + } + return t +} + +// ErrorCode returns the value for header x-ms-error-code. +func (brlr BlobReleaseLeaseResponse) ErrorCode() string { + return brlr.rawResponse.Header.Get("x-ms-error-code") +} + +// ETag returns the value for header ETag. +func (brlr BlobReleaseLeaseResponse) ETag() ETag { + return ETag(brlr.rawResponse.Header.Get("ETag")) +} + +// LastModified returns the value for header Last-Modified. +func (brlr BlobReleaseLeaseResponse) LastModified() time.Time { + s := brlr.rawResponse.Header.Get("Last-Modified") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + panic(err) + } + return t +} + +// RequestID returns the value for header x-ms-request-id. +func (brlr BlobReleaseLeaseResponse) RequestID() string { + return brlr.rawResponse.Header.Get("x-ms-request-id") +} + +// Version returns the value for header x-ms-version. +func (brlr BlobReleaseLeaseResponse) Version() string { + return brlr.rawResponse.Header.Get("x-ms-version") +} + +// BlobRenewLeaseResponse ... +type BlobRenewLeaseResponse struct { + rawResponse *http.Response +} + +// Response returns the raw HTTP response object. +func (brlr BlobRenewLeaseResponse) Response() *http.Response { + return brlr.rawResponse +} + +// StatusCode returns the HTTP status code of the response, e.g. 200. +func (brlr BlobRenewLeaseResponse) StatusCode() int { + return brlr.rawResponse.StatusCode +} + +// Status returns the HTTP status message of the response, e.g. "200 OK". +func (brlr BlobRenewLeaseResponse) Status() string { + return brlr.rawResponse.Status +} + +// Date returns the value for header Date. +func (brlr BlobRenewLeaseResponse) Date() time.Time { + s := brlr.rawResponse.Header.Get("Date") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + panic(err) + } + return t +} + +// ErrorCode returns the value for header x-ms-error-code. +func (brlr BlobRenewLeaseResponse) ErrorCode() string { + return brlr.rawResponse.Header.Get("x-ms-error-code") +} + +// ETag returns the value for header ETag. +func (brlr BlobRenewLeaseResponse) ETag() ETag { + return ETag(brlr.rawResponse.Header.Get("ETag")) +} + +// LastModified returns the value for header Last-Modified. +func (brlr BlobRenewLeaseResponse) LastModified() time.Time { + s := brlr.rawResponse.Header.Get("Last-Modified") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + panic(err) + } + return t +} + +// LeaseID returns the value for header x-ms-lease-id. +func (brlr BlobRenewLeaseResponse) LeaseID() string { + return brlr.rawResponse.Header.Get("x-ms-lease-id") +} + +// RequestID returns the value for header x-ms-request-id. +func (brlr BlobRenewLeaseResponse) RequestID() string { + return brlr.rawResponse.Header.Get("x-ms-request-id") +} + +// Version returns the value for header x-ms-version. +func (brlr BlobRenewLeaseResponse) Version() string { + return brlr.rawResponse.Header.Get("x-ms-version") +} + +// BlobSetHTTPHeadersResponse ... +type BlobSetHTTPHeadersResponse struct { + rawResponse *http.Response +} + +// Response returns the raw HTTP response object. +func (bshhr BlobSetHTTPHeadersResponse) Response() *http.Response { + return bshhr.rawResponse +} + +// StatusCode returns the HTTP status code of the response, e.g. 200. +func (bshhr BlobSetHTTPHeadersResponse) StatusCode() int { + return bshhr.rawResponse.StatusCode +} + +// Status returns the HTTP status message of the response, e.g. "200 OK". +func (bshhr BlobSetHTTPHeadersResponse) Status() string { + return bshhr.rawResponse.Status +} + +// BlobSequenceNumber returns the value for header x-ms-blob-sequence-number. +func (bshhr BlobSetHTTPHeadersResponse) BlobSequenceNumber() int64 { + s := bshhr.rawResponse.Header.Get("x-ms-blob-sequence-number") + if s == "" { + return -1 + } + i, err := strconv.ParseInt(s, 10, 64) + if err != nil { + panic(err) + } + return i +} + +// Date returns the value for header Date. +func (bshhr BlobSetHTTPHeadersResponse) Date() time.Time { + s := bshhr.rawResponse.Header.Get("Date") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + panic(err) + } + return t +} + +// ErrorCode returns the value for header x-ms-error-code. +func (bshhr BlobSetHTTPHeadersResponse) ErrorCode() string { + return bshhr.rawResponse.Header.Get("x-ms-error-code") +} + +// ETag returns the value for header ETag. +func (bshhr BlobSetHTTPHeadersResponse) ETag() ETag { + return ETag(bshhr.rawResponse.Header.Get("ETag")) +} + +// LastModified returns the value for header Last-Modified. +func (bshhr BlobSetHTTPHeadersResponse) LastModified() time.Time { + s := bshhr.rawResponse.Header.Get("Last-Modified") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + panic(err) + } + return t +} + +// RequestID returns the value for header x-ms-request-id. +func (bshhr BlobSetHTTPHeadersResponse) RequestID() string { + return bshhr.rawResponse.Header.Get("x-ms-request-id") +} + +// Version returns the value for header x-ms-version. +func (bshhr BlobSetHTTPHeadersResponse) Version() string { + return bshhr.rawResponse.Header.Get("x-ms-version") +} + +// BlobSetMetadataResponse ... +type BlobSetMetadataResponse struct { + rawResponse *http.Response +} + +// Response returns the raw HTTP response object. +func (bsmr BlobSetMetadataResponse) Response() *http.Response { + return bsmr.rawResponse +} + +// StatusCode returns the HTTP status code of the response, e.g. 200. +func (bsmr BlobSetMetadataResponse) StatusCode() int { + return bsmr.rawResponse.StatusCode +} + +// Status returns the HTTP status message of the response, e.g. "200 OK". +func (bsmr BlobSetMetadataResponse) Status() string { + return bsmr.rawResponse.Status +} + +// Date returns the value for header Date. +func (bsmr BlobSetMetadataResponse) Date() time.Time { + s := bsmr.rawResponse.Header.Get("Date") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + panic(err) + } + return t +} + +// ErrorCode returns the value for header x-ms-error-code. +func (bsmr BlobSetMetadataResponse) ErrorCode() string { + return bsmr.rawResponse.Header.Get("x-ms-error-code") +} + +// ETag returns the value for header ETag. +func (bsmr BlobSetMetadataResponse) ETag() ETag { + return ETag(bsmr.rawResponse.Header.Get("ETag")) +} + +// IsServerEncrypted returns the value for header x-ms-request-server-encrypted. +func (bsmr BlobSetMetadataResponse) IsServerEncrypted() string { + return bsmr.rawResponse.Header.Get("x-ms-request-server-encrypted") +} + +// LastModified returns the value for header Last-Modified. +func (bsmr BlobSetMetadataResponse) LastModified() time.Time { + s := bsmr.rawResponse.Header.Get("Last-Modified") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + panic(err) + } + return t +} + +// RequestID returns the value for header x-ms-request-id. +func (bsmr BlobSetMetadataResponse) RequestID() string { + return bsmr.rawResponse.Header.Get("x-ms-request-id") +} + +// Version returns the value for header x-ms-version. +func (bsmr BlobSetMetadataResponse) Version() string { + return bsmr.rawResponse.Header.Get("x-ms-version") +} + +// BlobSetTierResponse ... +type BlobSetTierResponse struct { + rawResponse *http.Response +} + +// Response returns the raw HTTP response object. +func (bstr BlobSetTierResponse) Response() *http.Response { + return bstr.rawResponse +} + +// StatusCode returns the HTTP status code of the response, e.g. 200. +func (bstr BlobSetTierResponse) StatusCode() int { + return bstr.rawResponse.StatusCode +} + +// Status returns the HTTP status message of the response, e.g. "200 OK". +func (bstr BlobSetTierResponse) Status() string { + return bstr.rawResponse.Status +} + +// ErrorCode returns the value for header x-ms-error-code. +func (bstr BlobSetTierResponse) ErrorCode() string { + return bstr.rawResponse.Header.Get("x-ms-error-code") +} + +// RequestID returns the value for header x-ms-request-id. +func (bstr BlobSetTierResponse) RequestID() string { + return bstr.rawResponse.Header.Get("x-ms-request-id") +} + +// Version returns the value for header x-ms-version. +func (bstr BlobSetTierResponse) Version() string { + return bstr.rawResponse.Header.Get("x-ms-version") +} + +// BlobStartCopyFromURLResponse ... +type BlobStartCopyFromURLResponse struct { + rawResponse *http.Response +} + +// Response returns the raw HTTP response object. +func (bscfur BlobStartCopyFromURLResponse) Response() *http.Response { + return bscfur.rawResponse +} + +// StatusCode returns the HTTP status code of the response, e.g. 200. +func (bscfur BlobStartCopyFromURLResponse) StatusCode() int { + return bscfur.rawResponse.StatusCode +} + +// Status returns the HTTP status message of the response, e.g. "200 OK". +func (bscfur BlobStartCopyFromURLResponse) Status() string { + return bscfur.rawResponse.Status +} + +// CopyID returns the value for header x-ms-copy-id. +func (bscfur BlobStartCopyFromURLResponse) CopyID() string { + return bscfur.rawResponse.Header.Get("x-ms-copy-id") +} + +// CopyStatus returns the value for header x-ms-copy-status. +func (bscfur BlobStartCopyFromURLResponse) CopyStatus() CopyStatusType { + return CopyStatusType(bscfur.rawResponse.Header.Get("x-ms-copy-status")) +} + +// Date returns the value for header Date. +func (bscfur BlobStartCopyFromURLResponse) Date() time.Time { + s := bscfur.rawResponse.Header.Get("Date") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + panic(err) + } + return t +} + +// ErrorCode returns the value for header x-ms-error-code. +func (bscfur BlobStartCopyFromURLResponse) ErrorCode() string { + return bscfur.rawResponse.Header.Get("x-ms-error-code") +} + +// ETag returns the value for header ETag. +func (bscfur BlobStartCopyFromURLResponse) ETag() ETag { + return ETag(bscfur.rawResponse.Header.Get("ETag")) +} + +// LastModified returns the value for header Last-Modified. +func (bscfur BlobStartCopyFromURLResponse) LastModified() time.Time { + s := bscfur.rawResponse.Header.Get("Last-Modified") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + panic(err) + } + return t +} + +// RequestID returns the value for header x-ms-request-id. +func (bscfur BlobStartCopyFromURLResponse) RequestID() string { + return bscfur.rawResponse.Header.Get("x-ms-request-id") +} + +// Version returns the value for header x-ms-version. +func (bscfur BlobStartCopyFromURLResponse) Version() string { + return bscfur.rawResponse.Header.Get("x-ms-version") +} + +// BlobUndeleteResponse ... +type BlobUndeleteResponse struct { + rawResponse *http.Response +} + +// Response returns the raw HTTP response object. +func (bur BlobUndeleteResponse) Response() *http.Response { + return bur.rawResponse +} + +// StatusCode returns the HTTP status code of the response, e.g. 200. +func (bur BlobUndeleteResponse) StatusCode() int { + return bur.rawResponse.StatusCode +} + +// Status returns the HTTP status message of the response, e.g. "200 OK". +func (bur BlobUndeleteResponse) Status() string { + return bur.rawResponse.Status +} + +// Date returns the value for header Date. +func (bur BlobUndeleteResponse) Date() time.Time { + s := bur.rawResponse.Header.Get("Date") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + panic(err) + } + return t +} + +// ErrorCode returns the value for header x-ms-error-code. +func (bur BlobUndeleteResponse) ErrorCode() string { + return bur.rawResponse.Header.Get("x-ms-error-code") +} + +// RequestID returns the value for header x-ms-request-id. +func (bur BlobUndeleteResponse) RequestID() string { + return bur.rawResponse.Header.Get("x-ms-request-id") +} + +// Version returns the value for header x-ms-version. +func (bur BlobUndeleteResponse) Version() string { + return bur.rawResponse.Header.Get("x-ms-version") +} + +// Block - Represents a single block in a block blob. It describes the block's ID and size. +type Block struct { + // Name - The base64 encoded block ID. + Name string `xml:"Name"` + // Size - The block size in bytes. + Size int32 `xml:"Size"` +} + +// BlockBlobCommitBlockListResponse ... +type BlockBlobCommitBlockListResponse struct { + rawResponse *http.Response +} + +// Response returns the raw HTTP response object. +func (bbcblr BlockBlobCommitBlockListResponse) Response() *http.Response { + return bbcblr.rawResponse +} + +// StatusCode returns the HTTP status code of the response, e.g. 200. +func (bbcblr BlockBlobCommitBlockListResponse) StatusCode() int { + return bbcblr.rawResponse.StatusCode +} + +// Status returns the HTTP status message of the response, e.g. "200 OK". +func (bbcblr BlockBlobCommitBlockListResponse) Status() string { + return bbcblr.rawResponse.Status +} + +// ContentMD5 returns the value for header Content-MD5. +func (bbcblr BlockBlobCommitBlockListResponse) ContentMD5() []byte { + s := bbcblr.rawResponse.Header.Get("Content-MD5") + if s == "" { + return nil + } + b, err := base64.StdEncoding.DecodeString(s) + if err != nil { + panic(err) + } + return b +} + +// Date returns the value for header Date. +func (bbcblr BlockBlobCommitBlockListResponse) Date() time.Time { + s := bbcblr.rawResponse.Header.Get("Date") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + panic(err) + } + return t +} + +// ErrorCode returns the value for header x-ms-error-code. +func (bbcblr BlockBlobCommitBlockListResponse) ErrorCode() string { + return bbcblr.rawResponse.Header.Get("x-ms-error-code") +} + +// ETag returns the value for header ETag. +func (bbcblr BlockBlobCommitBlockListResponse) ETag() ETag { + return ETag(bbcblr.rawResponse.Header.Get("ETag")) +} + +// IsServerEncrypted returns the value for header x-ms-request-server-encrypted. +func (bbcblr BlockBlobCommitBlockListResponse) IsServerEncrypted() string { + return bbcblr.rawResponse.Header.Get("x-ms-request-server-encrypted") +} + +// LastModified returns the value for header Last-Modified. +func (bbcblr BlockBlobCommitBlockListResponse) LastModified() time.Time { + s := bbcblr.rawResponse.Header.Get("Last-Modified") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + panic(err) + } + return t +} + +// RequestID returns the value for header x-ms-request-id. +func (bbcblr BlockBlobCommitBlockListResponse) RequestID() string { + return bbcblr.rawResponse.Header.Get("x-ms-request-id") +} + +// Version returns the value for header x-ms-version. +func (bbcblr BlockBlobCommitBlockListResponse) Version() string { + return bbcblr.rawResponse.Header.Get("x-ms-version") +} + +// BlockBlobStageBlockFromURLResponse ... +type BlockBlobStageBlockFromURLResponse struct { + rawResponse *http.Response +} + +// Response returns the raw HTTP response object. +func (bbsbfur BlockBlobStageBlockFromURLResponse) Response() *http.Response { + return bbsbfur.rawResponse +} + +// StatusCode returns the HTTP status code of the response, e.g. 200. +func (bbsbfur BlockBlobStageBlockFromURLResponse) StatusCode() int { + return bbsbfur.rawResponse.StatusCode +} + +// Status returns the HTTP status message of the response, e.g. "200 OK". +func (bbsbfur BlockBlobStageBlockFromURLResponse) Status() string { + return bbsbfur.rawResponse.Status +} + +// ContentMD5 returns the value for header Content-MD5. +func (bbsbfur BlockBlobStageBlockFromURLResponse) ContentMD5() []byte { + s := bbsbfur.rawResponse.Header.Get("Content-MD5") + if s == "" { + return nil + } + b, err := base64.StdEncoding.DecodeString(s) + if err != nil { + panic(err) + } + return b +} + +// Date returns the value for header Date. +func (bbsbfur BlockBlobStageBlockFromURLResponse) Date() time.Time { + s := bbsbfur.rawResponse.Header.Get("Date") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + panic(err) + } + return t +} + +// ErrorCode returns the value for header x-ms-error-code. +func (bbsbfur BlockBlobStageBlockFromURLResponse) ErrorCode() string { + return bbsbfur.rawResponse.Header.Get("x-ms-error-code") +} + +// IsServerEncrypted returns the value for header x-ms-request-server-encrypted. +func (bbsbfur BlockBlobStageBlockFromURLResponse) IsServerEncrypted() string { + return bbsbfur.rawResponse.Header.Get("x-ms-request-server-encrypted") +} + +// RequestID returns the value for header x-ms-request-id. +func (bbsbfur BlockBlobStageBlockFromURLResponse) RequestID() string { + return bbsbfur.rawResponse.Header.Get("x-ms-request-id") +} + +// Version returns the value for header x-ms-version. +func (bbsbfur BlockBlobStageBlockFromURLResponse) Version() string { + return bbsbfur.rawResponse.Header.Get("x-ms-version") +} + +// BlockBlobStageBlockResponse ... +type BlockBlobStageBlockResponse struct { + rawResponse *http.Response +} + +// Response returns the raw HTTP response object. +func (bbsbr BlockBlobStageBlockResponse) Response() *http.Response { + return bbsbr.rawResponse +} + +// StatusCode returns the HTTP status code of the response, e.g. 200. +func (bbsbr BlockBlobStageBlockResponse) StatusCode() int { + return bbsbr.rawResponse.StatusCode +} + +// Status returns the HTTP status message of the response, e.g. "200 OK". +func (bbsbr BlockBlobStageBlockResponse) Status() string { + return bbsbr.rawResponse.Status +} + +// ContentMD5 returns the value for header Content-MD5. +func (bbsbr BlockBlobStageBlockResponse) ContentMD5() []byte { + s := bbsbr.rawResponse.Header.Get("Content-MD5") + if s == "" { + return nil + } + b, err := base64.StdEncoding.DecodeString(s) + if err != nil { + panic(err) + } + return b +} + +// Date returns the value for header Date. +func (bbsbr BlockBlobStageBlockResponse) Date() time.Time { + s := bbsbr.rawResponse.Header.Get("Date") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + panic(err) + } + return t +} + +// ErrorCode returns the value for header x-ms-error-code. +func (bbsbr BlockBlobStageBlockResponse) ErrorCode() string { + return bbsbr.rawResponse.Header.Get("x-ms-error-code") +} + +// IsServerEncrypted returns the value for header x-ms-request-server-encrypted. +func (bbsbr BlockBlobStageBlockResponse) IsServerEncrypted() string { + return bbsbr.rawResponse.Header.Get("x-ms-request-server-encrypted") +} + +// RequestID returns the value for header x-ms-request-id. +func (bbsbr BlockBlobStageBlockResponse) RequestID() string { + return bbsbr.rawResponse.Header.Get("x-ms-request-id") +} + +// Version returns the value for header x-ms-version. +func (bbsbr BlockBlobStageBlockResponse) Version() string { + return bbsbr.rawResponse.Header.Get("x-ms-version") +} + +// BlockBlobUploadResponse ... +type BlockBlobUploadResponse struct { + rawResponse *http.Response +} + +// Response returns the raw HTTP response object. +func (bbur BlockBlobUploadResponse) Response() *http.Response { + return bbur.rawResponse +} + +// StatusCode returns the HTTP status code of the response, e.g. 200. +func (bbur BlockBlobUploadResponse) StatusCode() int { + return bbur.rawResponse.StatusCode +} + +// Status returns the HTTP status message of the response, e.g. "200 OK". +func (bbur BlockBlobUploadResponse) Status() string { + return bbur.rawResponse.Status +} + +// ContentMD5 returns the value for header Content-MD5. +func (bbur BlockBlobUploadResponse) ContentMD5() []byte { + s := bbur.rawResponse.Header.Get("Content-MD5") + if s == "" { + return nil + } + b, err := base64.StdEncoding.DecodeString(s) + if err != nil { + panic(err) + } + return b +} + +// Date returns the value for header Date. +func (bbur BlockBlobUploadResponse) Date() time.Time { + s := bbur.rawResponse.Header.Get("Date") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + panic(err) + } + return t +} + +// ErrorCode returns the value for header x-ms-error-code. +func (bbur BlockBlobUploadResponse) ErrorCode() string { + return bbur.rawResponse.Header.Get("x-ms-error-code") +} + +// ETag returns the value for header ETag. +func (bbur BlockBlobUploadResponse) ETag() ETag { + return ETag(bbur.rawResponse.Header.Get("ETag")) +} + +// IsServerEncrypted returns the value for header x-ms-request-server-encrypted. +func (bbur BlockBlobUploadResponse) IsServerEncrypted() string { + return bbur.rawResponse.Header.Get("x-ms-request-server-encrypted") +} + +// LastModified returns the value for header Last-Modified. +func (bbur BlockBlobUploadResponse) LastModified() time.Time { + s := bbur.rawResponse.Header.Get("Last-Modified") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + panic(err) + } + return t +} + +// RequestID returns the value for header x-ms-request-id. +func (bbur BlockBlobUploadResponse) RequestID() string { + return bbur.rawResponse.Header.Get("x-ms-request-id") +} + +// Version returns the value for header x-ms-version. +func (bbur BlockBlobUploadResponse) Version() string { + return bbur.rawResponse.Header.Get("x-ms-version") +} + +// BlockList ... +type BlockList struct { + rawResponse *http.Response + CommittedBlocks []Block `xml:"CommittedBlocks>Block"` + UncommittedBlocks []Block `xml:"UncommittedBlocks>Block"` +} + +// Response returns the raw HTTP response object. +func (bl BlockList) Response() *http.Response { + return bl.rawResponse +} + +// StatusCode returns the HTTP status code of the response, e.g. 200. +func (bl BlockList) StatusCode() int { + return bl.rawResponse.StatusCode +} + +// Status returns the HTTP status message of the response, e.g. "200 OK". +func (bl BlockList) Status() string { + return bl.rawResponse.Status +} + +// BlobContentLength returns the value for header x-ms-blob-content-length. +func (bl BlockList) BlobContentLength() int64 { + s := bl.rawResponse.Header.Get("x-ms-blob-content-length") + if s == "" { + return -1 + } + i, err := strconv.ParseInt(s, 10, 64) + if err != nil { + panic(err) + } + return i +} + +// ContentType returns the value for header Content-Type. +func (bl BlockList) ContentType() string { + return bl.rawResponse.Header.Get("Content-Type") +} + +// Date returns the value for header Date. +func (bl BlockList) Date() time.Time { + s := bl.rawResponse.Header.Get("Date") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + panic(err) + } + return t +} + +// ErrorCode returns the value for header x-ms-error-code. +func (bl BlockList) ErrorCode() string { + return bl.rawResponse.Header.Get("x-ms-error-code") +} + +// ETag returns the value for header ETag. +func (bl BlockList) ETag() ETag { + return ETag(bl.rawResponse.Header.Get("ETag")) +} + +// LastModified returns the value for header Last-Modified. +func (bl BlockList) LastModified() time.Time { + s := bl.rawResponse.Header.Get("Last-Modified") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + panic(err) + } + return t +} + +// RequestID returns the value for header x-ms-request-id. +func (bl BlockList) RequestID() string { + return bl.rawResponse.Header.Get("x-ms-request-id") +} + +// Version returns the value for header x-ms-version. +func (bl BlockList) Version() string { + return bl.rawResponse.Header.Get("x-ms-version") +} + +// BlockLookupList ... +type BlockLookupList struct { + // XMLName is used for marshalling and is subject to removal in a future release. + XMLName xml.Name `xml:"BlockList"` + Committed []string `xml:"Committed"` + Uncommitted []string `xml:"Uncommitted"` + Latest []string `xml:"Latest"` +} + +// ClearRange ... +type ClearRange struct { + Start int64 `xml:"Start"` + End int64 `xml:"End"` +} + +// ContainerAcquireLeaseResponse ... +type ContainerAcquireLeaseResponse struct { + rawResponse *http.Response +} + +// Response returns the raw HTTP response object. +func (calr ContainerAcquireLeaseResponse) Response() *http.Response { + return calr.rawResponse +} + +// StatusCode returns the HTTP status code of the response, e.g. 200. +func (calr ContainerAcquireLeaseResponse) StatusCode() int { + return calr.rawResponse.StatusCode +} + +// Status returns the HTTP status message of the response, e.g. "200 OK". +func (calr ContainerAcquireLeaseResponse) Status() string { + return calr.rawResponse.Status +} + +// Date returns the value for header Date. +func (calr ContainerAcquireLeaseResponse) Date() time.Time { + s := calr.rawResponse.Header.Get("Date") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + panic(err) + } + return t +} + +// ErrorCode returns the value for header x-ms-error-code. +func (calr ContainerAcquireLeaseResponse) ErrorCode() string { + return calr.rawResponse.Header.Get("x-ms-error-code") +} + +// ETag returns the value for header ETag. +func (calr ContainerAcquireLeaseResponse) ETag() ETag { + return ETag(calr.rawResponse.Header.Get("ETag")) +} + +// LastModified returns the value for header Last-Modified. +func (calr ContainerAcquireLeaseResponse) LastModified() time.Time { + s := calr.rawResponse.Header.Get("Last-Modified") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + panic(err) + } + return t +} + +// LeaseID returns the value for header x-ms-lease-id. +func (calr ContainerAcquireLeaseResponse) LeaseID() string { + return calr.rawResponse.Header.Get("x-ms-lease-id") +} + +// RequestID returns the value for header x-ms-request-id. +func (calr ContainerAcquireLeaseResponse) RequestID() string { + return calr.rawResponse.Header.Get("x-ms-request-id") +} + +// Version returns the value for header x-ms-version. +func (calr ContainerAcquireLeaseResponse) Version() string { + return calr.rawResponse.Header.Get("x-ms-version") +} + +// ContainerBreakLeaseResponse ... +type ContainerBreakLeaseResponse struct { + rawResponse *http.Response +} + +// Response returns the raw HTTP response object. +func (cblr ContainerBreakLeaseResponse) Response() *http.Response { + return cblr.rawResponse +} + +// StatusCode returns the HTTP status code of the response, e.g. 200. +func (cblr ContainerBreakLeaseResponse) StatusCode() int { + return cblr.rawResponse.StatusCode +} + +// Status returns the HTTP status message of the response, e.g. "200 OK". +func (cblr ContainerBreakLeaseResponse) Status() string { + return cblr.rawResponse.Status +} + +// Date returns the value for header Date. +func (cblr ContainerBreakLeaseResponse) Date() time.Time { + s := cblr.rawResponse.Header.Get("Date") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + panic(err) + } + return t +} + +// ErrorCode returns the value for header x-ms-error-code. +func (cblr ContainerBreakLeaseResponse) ErrorCode() string { + return cblr.rawResponse.Header.Get("x-ms-error-code") +} + +// ETag returns the value for header ETag. +func (cblr ContainerBreakLeaseResponse) ETag() ETag { + return ETag(cblr.rawResponse.Header.Get("ETag")) +} + +// LastModified returns the value for header Last-Modified. +func (cblr ContainerBreakLeaseResponse) LastModified() time.Time { + s := cblr.rawResponse.Header.Get("Last-Modified") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + panic(err) + } + return t +} + +// LeaseTime returns the value for header x-ms-lease-time. +func (cblr ContainerBreakLeaseResponse) LeaseTime() int32 { + s := cblr.rawResponse.Header.Get("x-ms-lease-time") + if s == "" { + return -1 + } + i, err := strconv.ParseInt(s, 10, 32) + if err != nil { + panic(err) + } + return int32(i) +} + +// RequestID returns the value for header x-ms-request-id. +func (cblr ContainerBreakLeaseResponse) RequestID() string { + return cblr.rawResponse.Header.Get("x-ms-request-id") +} + +// Version returns the value for header x-ms-version. +func (cblr ContainerBreakLeaseResponse) Version() string { + return cblr.rawResponse.Header.Get("x-ms-version") +} + +// ContainerChangeLeaseResponse ... +type ContainerChangeLeaseResponse struct { + rawResponse *http.Response +} + +// Response returns the raw HTTP response object. +func (cclr ContainerChangeLeaseResponse) Response() *http.Response { + return cclr.rawResponse +} + +// StatusCode returns the HTTP status code of the response, e.g. 200. +func (cclr ContainerChangeLeaseResponse) StatusCode() int { + return cclr.rawResponse.StatusCode +} + +// Status returns the HTTP status message of the response, e.g. "200 OK". +func (cclr ContainerChangeLeaseResponse) Status() string { + return cclr.rawResponse.Status +} + +// Date returns the value for header Date. +func (cclr ContainerChangeLeaseResponse) Date() time.Time { + s := cclr.rawResponse.Header.Get("Date") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + panic(err) + } + return t +} + +// ErrorCode returns the value for header x-ms-error-code. +func (cclr ContainerChangeLeaseResponse) ErrorCode() string { + return cclr.rawResponse.Header.Get("x-ms-error-code") +} + +// ETag returns the value for header ETag. +func (cclr ContainerChangeLeaseResponse) ETag() ETag { + return ETag(cclr.rawResponse.Header.Get("ETag")) +} + +// LastModified returns the value for header Last-Modified. +func (cclr ContainerChangeLeaseResponse) LastModified() time.Time { + s := cclr.rawResponse.Header.Get("Last-Modified") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + panic(err) + } + return t +} + +// LeaseID returns the value for header x-ms-lease-id. +func (cclr ContainerChangeLeaseResponse) LeaseID() string { + return cclr.rawResponse.Header.Get("x-ms-lease-id") +} + +// RequestID returns the value for header x-ms-request-id. +func (cclr ContainerChangeLeaseResponse) RequestID() string { + return cclr.rawResponse.Header.Get("x-ms-request-id") +} + +// Version returns the value for header x-ms-version. +func (cclr ContainerChangeLeaseResponse) Version() string { + return cclr.rawResponse.Header.Get("x-ms-version") +} + +// ContainerCreateResponse ... +type ContainerCreateResponse struct { + rawResponse *http.Response +} + +// Response returns the raw HTTP response object. +func (ccr ContainerCreateResponse) Response() *http.Response { + return ccr.rawResponse +} + +// StatusCode returns the HTTP status code of the response, e.g. 200. +func (ccr ContainerCreateResponse) StatusCode() int { + return ccr.rawResponse.StatusCode +} + +// Status returns the HTTP status message of the response, e.g. "200 OK". +func (ccr ContainerCreateResponse) Status() string { + return ccr.rawResponse.Status +} + +// Date returns the value for header Date. +func (ccr ContainerCreateResponse) Date() time.Time { + s := ccr.rawResponse.Header.Get("Date") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + panic(err) + } + return t +} + +// ErrorCode returns the value for header x-ms-error-code. +func (ccr ContainerCreateResponse) ErrorCode() string { + return ccr.rawResponse.Header.Get("x-ms-error-code") +} + +// ETag returns the value for header ETag. +func (ccr ContainerCreateResponse) ETag() ETag { + return ETag(ccr.rawResponse.Header.Get("ETag")) +} + +// LastModified returns the value for header Last-Modified. +func (ccr ContainerCreateResponse) LastModified() time.Time { + s := ccr.rawResponse.Header.Get("Last-Modified") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + panic(err) + } + return t +} + +// RequestID returns the value for header x-ms-request-id. +func (ccr ContainerCreateResponse) RequestID() string { + return ccr.rawResponse.Header.Get("x-ms-request-id") +} + +// Version returns the value for header x-ms-version. +func (ccr ContainerCreateResponse) Version() string { + return ccr.rawResponse.Header.Get("x-ms-version") +} + +// ContainerDeleteResponse ... +type ContainerDeleteResponse struct { + rawResponse *http.Response +} + +// Response returns the raw HTTP response object. +func (cdr ContainerDeleteResponse) Response() *http.Response { + return cdr.rawResponse +} + +// StatusCode returns the HTTP status code of the response, e.g. 200. +func (cdr ContainerDeleteResponse) StatusCode() int { + return cdr.rawResponse.StatusCode +} + +// Status returns the HTTP status message of the response, e.g. "200 OK". +func (cdr ContainerDeleteResponse) Status() string { + return cdr.rawResponse.Status +} + +// Date returns the value for header Date. +func (cdr ContainerDeleteResponse) Date() time.Time { + s := cdr.rawResponse.Header.Get("Date") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + panic(err) + } + return t +} + +// ErrorCode returns the value for header x-ms-error-code. +func (cdr ContainerDeleteResponse) ErrorCode() string { + return cdr.rawResponse.Header.Get("x-ms-error-code") +} + +// RequestID returns the value for header x-ms-request-id. +func (cdr ContainerDeleteResponse) RequestID() string { + return cdr.rawResponse.Header.Get("x-ms-request-id") +} + +// Version returns the value for header x-ms-version. +func (cdr ContainerDeleteResponse) Version() string { + return cdr.rawResponse.Header.Get("x-ms-version") +} + +// ContainerGetPropertiesResponse ... +type ContainerGetPropertiesResponse struct { + rawResponse *http.Response +} + +// NewMetadata returns user-defined key/value pairs. +func (cgpr ContainerGetPropertiesResponse) NewMetadata() Metadata { + md := Metadata{} + for k, v := range cgpr.rawResponse.Header { + if len(k) > mdPrefixLen { + if prefix := k[0:mdPrefixLen]; strings.EqualFold(prefix, mdPrefix) { + md[strings.ToLower(k[mdPrefixLen:])] = v[0] + } + } + } + return md +} + +// Response returns the raw HTTP response object. +func (cgpr ContainerGetPropertiesResponse) Response() *http.Response { + return cgpr.rawResponse +} + +// StatusCode returns the HTTP status code of the response, e.g. 200. +func (cgpr ContainerGetPropertiesResponse) StatusCode() int { + return cgpr.rawResponse.StatusCode +} + +// Status returns the HTTP status message of the response, e.g. "200 OK". +func (cgpr ContainerGetPropertiesResponse) Status() string { + return cgpr.rawResponse.Status +} + +// BlobPublicAccess returns the value for header x-ms-blob-public-access. +func (cgpr ContainerGetPropertiesResponse) BlobPublicAccess() PublicAccessType { + return PublicAccessType(cgpr.rawResponse.Header.Get("x-ms-blob-public-access")) +} + +// Date returns the value for header Date. +func (cgpr ContainerGetPropertiesResponse) Date() time.Time { + s := cgpr.rawResponse.Header.Get("Date") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + panic(err) + } + return t +} + +// ErrorCode returns the value for header x-ms-error-code. +func (cgpr ContainerGetPropertiesResponse) ErrorCode() string { + return cgpr.rawResponse.Header.Get("x-ms-error-code") +} + +// ETag returns the value for header ETag. +func (cgpr ContainerGetPropertiesResponse) ETag() ETag { + return ETag(cgpr.rawResponse.Header.Get("ETag")) +} + +// LastModified returns the value for header Last-Modified. +func (cgpr ContainerGetPropertiesResponse) LastModified() time.Time { + s := cgpr.rawResponse.Header.Get("Last-Modified") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + panic(err) + } + return t +} + +// LeaseDuration returns the value for header x-ms-lease-duration. +func (cgpr ContainerGetPropertiesResponse) LeaseDuration() LeaseDurationType { + return LeaseDurationType(cgpr.rawResponse.Header.Get("x-ms-lease-duration")) +} + +// LeaseState returns the value for header x-ms-lease-state. +func (cgpr ContainerGetPropertiesResponse) LeaseState() LeaseStateType { + return LeaseStateType(cgpr.rawResponse.Header.Get("x-ms-lease-state")) +} + +// LeaseStatus returns the value for header x-ms-lease-status. +func (cgpr ContainerGetPropertiesResponse) LeaseStatus() LeaseStatusType { + return LeaseStatusType(cgpr.rawResponse.Header.Get("x-ms-lease-status")) +} + +// RequestID returns the value for header x-ms-request-id. +func (cgpr ContainerGetPropertiesResponse) RequestID() string { + return cgpr.rawResponse.Header.Get("x-ms-request-id") +} + +// Version returns the value for header x-ms-version. +func (cgpr ContainerGetPropertiesResponse) Version() string { + return cgpr.rawResponse.Header.Get("x-ms-version") +} + +// ContainerItem - An Azure Storage container +type ContainerItem struct { + Name string `xml:"Name"` + Properties ContainerProperties `xml:"Properties"` + Metadata Metadata `xml:"Metadata"` +} + +// ContainerProperties - Properties of a container +type ContainerProperties struct { + LastModified time.Time `xml:"Last-Modified"` + Etag ETag `xml:"Etag"` + // LeaseStatus - Possible values include: 'LeaseStatusLocked', 'LeaseStatusUnlocked', 'LeaseStatusNone' + LeaseStatus LeaseStatusType `xml:"LeaseStatus"` + // LeaseState - Possible values include: 'LeaseStateAvailable', 'LeaseStateLeased', 'LeaseStateExpired', 'LeaseStateBreaking', 'LeaseStateBroken', 'LeaseStateNone' + LeaseState LeaseStateType `xml:"LeaseState"` + // LeaseDuration - Possible values include: 'LeaseDurationInfinite', 'LeaseDurationFixed', 'LeaseDurationNone' + LeaseDuration LeaseDurationType `xml:"LeaseDuration"` + // PublicAccess - Possible values include: 'PublicAccessContainer', 'PublicAccessBlob', 'PublicAccessNone' + PublicAccess PublicAccessType `xml:"PublicAccess"` +} + +// MarshalXML implements the xml.Marshaler interface for ContainerProperties. +func (cp ContainerProperties) MarshalXML(e *xml.Encoder, start xml.StartElement) error { + if reflect.TypeOf((*ContainerProperties)(nil)).Elem().Size() != reflect.TypeOf((*containerProperties)(nil)).Elem().Size() { + panic("size mismatch between ContainerProperties and containerProperties") + } + cp2 := (*containerProperties)(unsafe.Pointer(&cp)) + return e.EncodeElement(*cp2, start) +} + +// UnmarshalXML implements the xml.Unmarshaler interface for ContainerProperties. +func (cp *ContainerProperties) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { + if reflect.TypeOf((*ContainerProperties)(nil)).Elem().Size() != reflect.TypeOf((*containerProperties)(nil)).Elem().Size() { + panic("size mismatch between ContainerProperties and containerProperties") + } + cp2 := (*containerProperties)(unsafe.Pointer(cp)) + return d.DecodeElement(cp2, &start) +} + +// ContainerReleaseLeaseResponse ... +type ContainerReleaseLeaseResponse struct { + rawResponse *http.Response +} + +// Response returns the raw HTTP response object. +func (crlr ContainerReleaseLeaseResponse) Response() *http.Response { + return crlr.rawResponse +} + +// StatusCode returns the HTTP status code of the response, e.g. 200. +func (crlr ContainerReleaseLeaseResponse) StatusCode() int { + return crlr.rawResponse.StatusCode +} + +// Status returns the HTTP status message of the response, e.g. "200 OK". +func (crlr ContainerReleaseLeaseResponse) Status() string { + return crlr.rawResponse.Status +} + +// Date returns the value for header Date. +func (crlr ContainerReleaseLeaseResponse) Date() time.Time { + s := crlr.rawResponse.Header.Get("Date") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + panic(err) + } + return t +} + +// ErrorCode returns the value for header x-ms-error-code. +func (crlr ContainerReleaseLeaseResponse) ErrorCode() string { + return crlr.rawResponse.Header.Get("x-ms-error-code") +} + +// ETag returns the value for header ETag. +func (crlr ContainerReleaseLeaseResponse) ETag() ETag { + return ETag(crlr.rawResponse.Header.Get("ETag")) +} + +// LastModified returns the value for header Last-Modified. +func (crlr ContainerReleaseLeaseResponse) LastModified() time.Time { + s := crlr.rawResponse.Header.Get("Last-Modified") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + panic(err) + } + return t +} + +// RequestID returns the value for header x-ms-request-id. +func (crlr ContainerReleaseLeaseResponse) RequestID() string { + return crlr.rawResponse.Header.Get("x-ms-request-id") +} + +// Version returns the value for header x-ms-version. +func (crlr ContainerReleaseLeaseResponse) Version() string { + return crlr.rawResponse.Header.Get("x-ms-version") +} + +// ContainerRenewLeaseResponse ... +type ContainerRenewLeaseResponse struct { + rawResponse *http.Response +} + +// Response returns the raw HTTP response object. +func (crlr ContainerRenewLeaseResponse) Response() *http.Response { + return crlr.rawResponse +} + +// StatusCode returns the HTTP status code of the response, e.g. 200. +func (crlr ContainerRenewLeaseResponse) StatusCode() int { + return crlr.rawResponse.StatusCode +} + +// Status returns the HTTP status message of the response, e.g. "200 OK". +func (crlr ContainerRenewLeaseResponse) Status() string { + return crlr.rawResponse.Status +} + +// Date returns the value for header Date. +func (crlr ContainerRenewLeaseResponse) Date() time.Time { + s := crlr.rawResponse.Header.Get("Date") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + panic(err) + } + return t +} + +// ErrorCode returns the value for header x-ms-error-code. +func (crlr ContainerRenewLeaseResponse) ErrorCode() string { + return crlr.rawResponse.Header.Get("x-ms-error-code") +} + +// ETag returns the value for header ETag. +func (crlr ContainerRenewLeaseResponse) ETag() ETag { + return ETag(crlr.rawResponse.Header.Get("ETag")) +} + +// LastModified returns the value for header Last-Modified. +func (crlr ContainerRenewLeaseResponse) LastModified() time.Time { + s := crlr.rawResponse.Header.Get("Last-Modified") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + panic(err) + } + return t +} + +// LeaseID returns the value for header x-ms-lease-id. +func (crlr ContainerRenewLeaseResponse) LeaseID() string { + return crlr.rawResponse.Header.Get("x-ms-lease-id") +} + +// RequestID returns the value for header x-ms-request-id. +func (crlr ContainerRenewLeaseResponse) RequestID() string { + return crlr.rawResponse.Header.Get("x-ms-request-id") +} + +// Version returns the value for header x-ms-version. +func (crlr ContainerRenewLeaseResponse) Version() string { + return crlr.rawResponse.Header.Get("x-ms-version") +} + +// ContainerSetAccessPolicyResponse ... +type ContainerSetAccessPolicyResponse struct { + rawResponse *http.Response +} + +// Response returns the raw HTTP response object. +func (csapr ContainerSetAccessPolicyResponse) Response() *http.Response { + return csapr.rawResponse +} + +// StatusCode returns the HTTP status code of the response, e.g. 200. +func (csapr ContainerSetAccessPolicyResponse) StatusCode() int { + return csapr.rawResponse.StatusCode +} + +// Status returns the HTTP status message of the response, e.g. "200 OK". +func (csapr ContainerSetAccessPolicyResponse) Status() string { + return csapr.rawResponse.Status +} + +// Date returns the value for header Date. +func (csapr ContainerSetAccessPolicyResponse) Date() time.Time { + s := csapr.rawResponse.Header.Get("Date") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + panic(err) + } + return t +} + +// ErrorCode returns the value for header x-ms-error-code. +func (csapr ContainerSetAccessPolicyResponse) ErrorCode() string { + return csapr.rawResponse.Header.Get("x-ms-error-code") +} + +// ETag returns the value for header ETag. +func (csapr ContainerSetAccessPolicyResponse) ETag() ETag { + return ETag(csapr.rawResponse.Header.Get("ETag")) +} + +// LastModified returns the value for header Last-Modified. +func (csapr ContainerSetAccessPolicyResponse) LastModified() time.Time { + s := csapr.rawResponse.Header.Get("Last-Modified") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + panic(err) + } + return t +} + +// RequestID returns the value for header x-ms-request-id. +func (csapr ContainerSetAccessPolicyResponse) RequestID() string { + return csapr.rawResponse.Header.Get("x-ms-request-id") +} + +// Version returns the value for header x-ms-version. +func (csapr ContainerSetAccessPolicyResponse) Version() string { + return csapr.rawResponse.Header.Get("x-ms-version") +} + +// ContainerSetMetadataResponse ... +type ContainerSetMetadataResponse struct { + rawResponse *http.Response +} + +// Response returns the raw HTTP response object. +func (csmr ContainerSetMetadataResponse) Response() *http.Response { + return csmr.rawResponse +} + +// StatusCode returns the HTTP status code of the response, e.g. 200. +func (csmr ContainerSetMetadataResponse) StatusCode() int { + return csmr.rawResponse.StatusCode +} + +// Status returns the HTTP status message of the response, e.g. "200 OK". +func (csmr ContainerSetMetadataResponse) Status() string { + return csmr.rawResponse.Status +} + +// Date returns the value for header Date. +func (csmr ContainerSetMetadataResponse) Date() time.Time { + s := csmr.rawResponse.Header.Get("Date") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + panic(err) + } + return t +} + +// ErrorCode returns the value for header x-ms-error-code. +func (csmr ContainerSetMetadataResponse) ErrorCode() string { + return csmr.rawResponse.Header.Get("x-ms-error-code") +} + +// ETag returns the value for header ETag. +func (csmr ContainerSetMetadataResponse) ETag() ETag { + return ETag(csmr.rawResponse.Header.Get("ETag")) +} + +// LastModified returns the value for header Last-Modified. +func (csmr ContainerSetMetadataResponse) LastModified() time.Time { + s := csmr.rawResponse.Header.Get("Last-Modified") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + panic(err) + } + return t +} + +// RequestID returns the value for header x-ms-request-id. +func (csmr ContainerSetMetadataResponse) RequestID() string { + return csmr.rawResponse.Header.Get("x-ms-request-id") +} + +// Version returns the value for header x-ms-version. +func (csmr ContainerSetMetadataResponse) Version() string { + return csmr.rawResponse.Header.Get("x-ms-version") +} + +// CorsRule - CORS is an HTTP feature that enables a web application running under one domain to access +// resources in another domain. Web browsers implement a security restriction known as same-origin policy that +// prevents a web page from calling APIs in a different domain; CORS provides a secure way to allow one domain +// (the origin domain) to call APIs in another domain +type CorsRule struct { + // AllowedOrigins - The origin domains that are permitted to make a request against the storage service via CORS. The origin domain is the domain from which the request originates. Note that the origin must be an exact case-sensitive match with the origin that the user age sends to the service. You can also use the wildcard character '*' to allow all origin domains to make requests via CORS. + AllowedOrigins string `xml:"AllowedOrigins"` + // AllowedMethods - The methods (HTTP request verbs) that the origin domain may use for a CORS request. (comma separated) + AllowedMethods string `xml:"AllowedMethods"` + // AllowedHeaders - the request headers that the origin domain may specify on the CORS request. + AllowedHeaders string `xml:"AllowedHeaders"` + // ExposedHeaders - The response headers that may be sent in the response to the CORS request and exposed by the browser to the request issuer + ExposedHeaders string `xml:"ExposedHeaders"` + // MaxAgeInSeconds - The maximum amount time that a browser should cache the preflight OPTIONS request. + MaxAgeInSeconds int32 `xml:"MaxAgeInSeconds"` +} + +// downloadResponse ... +type downloadResponse struct { + rawResponse *http.Response +} + +// NewMetadata returns user-defined key/value pairs. +func (dr downloadResponse) NewMetadata() Metadata { + md := Metadata{} + for k, v := range dr.rawResponse.Header { + if len(k) > mdPrefixLen { + if prefix := k[0:mdPrefixLen]; strings.EqualFold(prefix, mdPrefix) { + md[strings.ToLower(k[mdPrefixLen:])] = v[0] + } + } + } + return md +} + +// Response returns the raw HTTP response object. +func (dr downloadResponse) Response() *http.Response { + return dr.rawResponse +} + +// StatusCode returns the HTTP status code of the response, e.g. 200. +func (dr downloadResponse) StatusCode() int { + return dr.rawResponse.StatusCode +} + +// Status returns the HTTP status message of the response, e.g. "200 OK". +func (dr downloadResponse) Status() string { + return dr.rawResponse.Status +} + +// Body returns the raw HTTP response object's Body. +func (dr downloadResponse) Body() io.ReadCloser { + return dr.rawResponse.Body +} + +// AcceptRanges returns the value for header Accept-Ranges. +func (dr downloadResponse) AcceptRanges() string { + return dr.rawResponse.Header.Get("Accept-Ranges") +} + +// BlobCommittedBlockCount returns the value for header x-ms-blob-committed-block-count. +func (dr downloadResponse) BlobCommittedBlockCount() int32 { + s := dr.rawResponse.Header.Get("x-ms-blob-committed-block-count") + if s == "" { + return -1 + } + i, err := strconv.ParseInt(s, 10, 32) + if err != nil { + panic(err) + } + return int32(i) +} + +// BlobContentMD5 returns the value for header x-ms-blob-content-md5. +func (dr downloadResponse) BlobContentMD5() []byte { + s := dr.rawResponse.Header.Get("x-ms-blob-content-md5") + if s == "" { + return nil + } + b, err := base64.StdEncoding.DecodeString(s) + if err != nil { + panic(err) + } + return b +} + +// BlobSequenceNumber returns the value for header x-ms-blob-sequence-number. +func (dr downloadResponse) BlobSequenceNumber() int64 { + s := dr.rawResponse.Header.Get("x-ms-blob-sequence-number") + if s == "" { + return -1 + } + i, err := strconv.ParseInt(s, 10, 64) + if err != nil { + panic(err) + } + return i +} + +// BlobType returns the value for header x-ms-blob-type. +func (dr downloadResponse) BlobType() BlobType { + return BlobType(dr.rawResponse.Header.Get("x-ms-blob-type")) +} + +// CacheControl returns the value for header Cache-Control. +func (dr downloadResponse) CacheControl() string { + return dr.rawResponse.Header.Get("Cache-Control") +} + +// ContentDisposition returns the value for header Content-Disposition. +func (dr downloadResponse) ContentDisposition() string { + return dr.rawResponse.Header.Get("Content-Disposition") +} + +// ContentEncoding returns the value for header Content-Encoding. +func (dr downloadResponse) ContentEncoding() string { + return dr.rawResponse.Header.Get("Content-Encoding") +} + +// ContentLanguage returns the value for header Content-Language. +func (dr downloadResponse) ContentLanguage() string { + return dr.rawResponse.Header.Get("Content-Language") +} + +// ContentLength returns the value for header Content-Length. +func (dr downloadResponse) ContentLength() int64 { + s := dr.rawResponse.Header.Get("Content-Length") + if s == "" { + return -1 + } + i, err := strconv.ParseInt(s, 10, 64) + if err != nil { + panic(err) + } + return i +} + +// ContentMD5 returns the value for header Content-MD5. +func (dr downloadResponse) ContentMD5() []byte { + s := dr.rawResponse.Header.Get("Content-MD5") + if s == "" { + return nil + } + b, err := base64.StdEncoding.DecodeString(s) + if err != nil { + panic(err) + } + return b +} + +// ContentRange returns the value for header Content-Range. +func (dr downloadResponse) ContentRange() string { + return dr.rawResponse.Header.Get("Content-Range") +} + +// ContentType returns the value for header Content-Type. +func (dr downloadResponse) ContentType() string { + return dr.rawResponse.Header.Get("Content-Type") +} + +// CopyCompletionTime returns the value for header x-ms-copy-completion-time. +func (dr downloadResponse) CopyCompletionTime() time.Time { + s := dr.rawResponse.Header.Get("x-ms-copy-completion-time") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + panic(err) + } + return t +} + +// CopyID returns the value for header x-ms-copy-id. +func (dr downloadResponse) CopyID() string { + return dr.rawResponse.Header.Get("x-ms-copy-id") +} + +// CopyProgress returns the value for header x-ms-copy-progress. +func (dr downloadResponse) CopyProgress() string { + return dr.rawResponse.Header.Get("x-ms-copy-progress") +} + +// CopySource returns the value for header x-ms-copy-source. +func (dr downloadResponse) CopySource() string { + return dr.rawResponse.Header.Get("x-ms-copy-source") +} + +// CopyStatus returns the value for header x-ms-copy-status. +func (dr downloadResponse) CopyStatus() CopyStatusType { + return CopyStatusType(dr.rawResponse.Header.Get("x-ms-copy-status")) +} + +// CopyStatusDescription returns the value for header x-ms-copy-status-description. +func (dr downloadResponse) CopyStatusDescription() string { + return dr.rawResponse.Header.Get("x-ms-copy-status-description") +} + +// Date returns the value for header Date. +func (dr downloadResponse) Date() time.Time { + s := dr.rawResponse.Header.Get("Date") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + panic(err) + } + return t +} + +// ErrorCode returns the value for header x-ms-error-code. +func (dr downloadResponse) ErrorCode() string { + return dr.rawResponse.Header.Get("x-ms-error-code") +} + +// ETag returns the value for header ETag. +func (dr downloadResponse) ETag() ETag { + return ETag(dr.rawResponse.Header.Get("ETag")) +} + +// IsServerEncrypted returns the value for header x-ms-server-encrypted. +func (dr downloadResponse) IsServerEncrypted() string { + return dr.rawResponse.Header.Get("x-ms-server-encrypted") +} + +// LastModified returns the value for header Last-Modified. +func (dr downloadResponse) LastModified() time.Time { + s := dr.rawResponse.Header.Get("Last-Modified") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + panic(err) + } + return t +} + +// LeaseDuration returns the value for header x-ms-lease-duration. +func (dr downloadResponse) LeaseDuration() LeaseDurationType { + return LeaseDurationType(dr.rawResponse.Header.Get("x-ms-lease-duration")) +} + +// LeaseState returns the value for header x-ms-lease-state. +func (dr downloadResponse) LeaseState() LeaseStateType { + return LeaseStateType(dr.rawResponse.Header.Get("x-ms-lease-state")) +} + +// LeaseStatus returns the value for header x-ms-lease-status. +func (dr downloadResponse) LeaseStatus() LeaseStatusType { + return LeaseStatusType(dr.rawResponse.Header.Get("x-ms-lease-status")) +} + +// RequestID returns the value for header x-ms-request-id. +func (dr downloadResponse) RequestID() string { + return dr.rawResponse.Header.Get("x-ms-request-id") +} + +// Version returns the value for header x-ms-version. +func (dr downloadResponse) Version() string { + return dr.rawResponse.Header.Get("x-ms-version") +} + +// GeoReplication ... +type GeoReplication struct { + // Status - The status of the secondary location. Possible values include: 'GeoReplicationStatusLive', 'GeoReplicationStatusBootstrap', 'GeoReplicationStatusUnavailable', 'GeoReplicationStatusNone' + Status GeoReplicationStatusType `xml:"Status"` + // LastSyncTime - A GMT date/time value, to the second. All primary writes preceding this value are guaranteed to be available for read operations at the secondary. Primary writes after this point in time may or may not be available for reads. + LastSyncTime time.Time `xml:"LastSyncTime"` +} + +// MarshalXML implements the xml.Marshaler interface for GeoReplication. +func (gr GeoReplication) MarshalXML(e *xml.Encoder, start xml.StartElement) error { + if reflect.TypeOf((*GeoReplication)(nil)).Elem().Size() != reflect.TypeOf((*geoReplication)(nil)).Elem().Size() { + panic("size mismatch between GeoReplication and geoReplication") + } + gr2 := (*geoReplication)(unsafe.Pointer(&gr)) + return e.EncodeElement(*gr2, start) +} + +// UnmarshalXML implements the xml.Unmarshaler interface for GeoReplication. +func (gr *GeoReplication) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { + if reflect.TypeOf((*GeoReplication)(nil)).Elem().Size() != reflect.TypeOf((*geoReplication)(nil)).Elem().Size() { + panic("size mismatch between GeoReplication and geoReplication") + } + gr2 := (*geoReplication)(unsafe.Pointer(gr)) + return d.DecodeElement(gr2, &start) +} + +// ListBlobsFlatSegmentResponse - An enumeration of blobs +type ListBlobsFlatSegmentResponse struct { + rawResponse *http.Response + // XMLName is used for marshalling and is subject to removal in a future release. + XMLName xml.Name `xml:"EnumerationResults"` + ServiceEndpoint string `xml:"ServiceEndpoint,attr"` + ContainerName string `xml:"ContainerName,attr"` + Prefix string `xml:"Prefix"` + Marker string `xml:"Marker"` + MaxResults int32 `xml:"MaxResults"` + Delimiter string `xml:"Delimiter"` + Segment BlobFlatList `xml:"Blobs"` + NextMarker Marker `xml:"NextMarker"` +} + +// Response returns the raw HTTP response object. +func (lbfsr ListBlobsFlatSegmentResponse) Response() *http.Response { + return lbfsr.rawResponse +} + +// StatusCode returns the HTTP status code of the response, e.g. 200. +func (lbfsr ListBlobsFlatSegmentResponse) StatusCode() int { + return lbfsr.rawResponse.StatusCode +} + +// Status returns the HTTP status message of the response, e.g. "200 OK". +func (lbfsr ListBlobsFlatSegmentResponse) Status() string { + return lbfsr.rawResponse.Status +} + +// ContentType returns the value for header Content-Type. +func (lbfsr ListBlobsFlatSegmentResponse) ContentType() string { + return lbfsr.rawResponse.Header.Get("Content-Type") +} + +// Date returns the value for header Date. +func (lbfsr ListBlobsFlatSegmentResponse) Date() time.Time { + s := lbfsr.rawResponse.Header.Get("Date") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + panic(err) + } + return t +} + +// ErrorCode returns the value for header x-ms-error-code. +func (lbfsr ListBlobsFlatSegmentResponse) ErrorCode() string { + return lbfsr.rawResponse.Header.Get("x-ms-error-code") +} + +// RequestID returns the value for header x-ms-request-id. +func (lbfsr ListBlobsFlatSegmentResponse) RequestID() string { + return lbfsr.rawResponse.Header.Get("x-ms-request-id") +} + +// Version returns the value for header x-ms-version. +func (lbfsr ListBlobsFlatSegmentResponse) Version() string { + return lbfsr.rawResponse.Header.Get("x-ms-version") +} + +// ListBlobsHierarchySegmentResponse - An enumeration of blobs +type ListBlobsHierarchySegmentResponse struct { + rawResponse *http.Response + // XMLName is used for marshalling and is subject to removal in a future release. + XMLName xml.Name `xml:"EnumerationResults"` + ServiceEndpoint string `xml:"ServiceEndpoint,attr"` + ContainerName string `xml:"ContainerName,attr"` + Prefix string `xml:"Prefix"` + Marker string `xml:"Marker"` + MaxResults int32 `xml:"MaxResults"` + Delimiter string `xml:"Delimiter"` + Segment BlobHierarchyList `xml:"Blobs"` + NextMarker Marker `xml:"NextMarker"` +} + +// Response returns the raw HTTP response object. +func (lbhsr ListBlobsHierarchySegmentResponse) Response() *http.Response { + return lbhsr.rawResponse +} + +// StatusCode returns the HTTP status code of the response, e.g. 200. +func (lbhsr ListBlobsHierarchySegmentResponse) StatusCode() int { + return lbhsr.rawResponse.StatusCode +} + +// Status returns the HTTP status message of the response, e.g. "200 OK". +func (lbhsr ListBlobsHierarchySegmentResponse) Status() string { + return lbhsr.rawResponse.Status +} + +// ContentType returns the value for header Content-Type. +func (lbhsr ListBlobsHierarchySegmentResponse) ContentType() string { + return lbhsr.rawResponse.Header.Get("Content-Type") +} + +// Date returns the value for header Date. +func (lbhsr ListBlobsHierarchySegmentResponse) Date() time.Time { + s := lbhsr.rawResponse.Header.Get("Date") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + panic(err) + } + return t +} + +// ErrorCode returns the value for header x-ms-error-code. +func (lbhsr ListBlobsHierarchySegmentResponse) ErrorCode() string { + return lbhsr.rawResponse.Header.Get("x-ms-error-code") +} + +// RequestID returns the value for header x-ms-request-id. +func (lbhsr ListBlobsHierarchySegmentResponse) RequestID() string { + return lbhsr.rawResponse.Header.Get("x-ms-request-id") +} + +// Version returns the value for header x-ms-version. +func (lbhsr ListBlobsHierarchySegmentResponse) Version() string { + return lbhsr.rawResponse.Header.Get("x-ms-version") +} + +// ListContainersResponse - An enumeration of containers +type ListContainersResponse struct { + rawResponse *http.Response + // XMLName is used for marshalling and is subject to removal in a future release. + XMLName xml.Name `xml:"EnumerationResults"` + ServiceEndpoint string `xml:"ServiceEndpoint,attr"` + Prefix string `xml:"Prefix"` + Marker *string `xml:"Marker"` + MaxResults int32 `xml:"MaxResults"` + ContainerItems []ContainerItem `xml:"Containers>Container"` + NextMarker Marker `xml:"NextMarker"` +} + +// Response returns the raw HTTP response object. +func (lcr ListContainersResponse) Response() *http.Response { + return lcr.rawResponse +} + +// StatusCode returns the HTTP status code of the response, e.g. 200. +func (lcr ListContainersResponse) StatusCode() int { + return lcr.rawResponse.StatusCode +} + +// Status returns the HTTP status message of the response, e.g. "200 OK". +func (lcr ListContainersResponse) Status() string { + return lcr.rawResponse.Status +} + +// ErrorCode returns the value for header x-ms-error-code. +func (lcr ListContainersResponse) ErrorCode() string { + return lcr.rawResponse.Header.Get("x-ms-error-code") +} + +// RequestID returns the value for header x-ms-request-id. +func (lcr ListContainersResponse) RequestID() string { + return lcr.rawResponse.Header.Get("x-ms-request-id") +} + +// Version returns the value for header x-ms-version. +func (lcr ListContainersResponse) Version() string { + return lcr.rawResponse.Header.Get("x-ms-version") +} + +// Logging - Azure Analytics Logging settings. +type Logging struct { + // Version - The version of Storage Analytics to configure. + Version string `xml:"Version"` + // Delete - Indicates whether all delete requests should be logged. + Delete bool `xml:"Delete"` + // Read - Indicates whether all read requests should be logged. + Read bool `xml:"Read"` + // Write - Indicates whether all write requests should be logged. + Write bool `xml:"Write"` + RetentionPolicy RetentionPolicy `xml:"RetentionPolicy"` +} + +// Metrics ... +type Metrics struct { + // Version - The version of Storage Analytics to configure. + Version *string `xml:"Version"` + // Enabled - Indicates whether metrics are enabled for the Blob service. + Enabled bool `xml:"Enabled"` + // IncludeAPIs - Indicates whether metrics should generate summary statistics for called API operations. + IncludeAPIs *bool `xml:"IncludeAPIs"` + RetentionPolicy *RetentionPolicy `xml:"RetentionPolicy"` +} + +// PageBlobClearPagesResponse ... +type PageBlobClearPagesResponse struct { + rawResponse *http.Response +} + +// Response returns the raw HTTP response object. +func (pbcpr PageBlobClearPagesResponse) Response() *http.Response { + return pbcpr.rawResponse +} + +// StatusCode returns the HTTP status code of the response, e.g. 200. +func (pbcpr PageBlobClearPagesResponse) StatusCode() int { + return pbcpr.rawResponse.StatusCode +} + +// Status returns the HTTP status message of the response, e.g. "200 OK". +func (pbcpr PageBlobClearPagesResponse) Status() string { + return pbcpr.rawResponse.Status +} + +// BlobSequenceNumber returns the value for header x-ms-blob-sequence-number. +func (pbcpr PageBlobClearPagesResponse) BlobSequenceNumber() int64 { + s := pbcpr.rawResponse.Header.Get("x-ms-blob-sequence-number") + if s == "" { + return -1 + } + i, err := strconv.ParseInt(s, 10, 64) + if err != nil { + panic(err) + } + return i +} + +// ContentMD5 returns the value for header Content-MD5. +func (pbcpr PageBlobClearPagesResponse) ContentMD5() []byte { + s := pbcpr.rawResponse.Header.Get("Content-MD5") + if s == "" { + return nil + } + b, err := base64.StdEncoding.DecodeString(s) + if err != nil { + panic(err) + } + return b +} + +// Date returns the value for header Date. +func (pbcpr PageBlobClearPagesResponse) Date() time.Time { + s := pbcpr.rawResponse.Header.Get("Date") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + panic(err) + } + return t +} + +// ErrorCode returns the value for header x-ms-error-code. +func (pbcpr PageBlobClearPagesResponse) ErrorCode() string { + return pbcpr.rawResponse.Header.Get("x-ms-error-code") +} + +// ETag returns the value for header ETag. +func (pbcpr PageBlobClearPagesResponse) ETag() ETag { + return ETag(pbcpr.rawResponse.Header.Get("ETag")) +} + +// LastModified returns the value for header Last-Modified. +func (pbcpr PageBlobClearPagesResponse) LastModified() time.Time { + s := pbcpr.rawResponse.Header.Get("Last-Modified") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + panic(err) + } + return t +} + +// RequestID returns the value for header x-ms-request-id. +func (pbcpr PageBlobClearPagesResponse) RequestID() string { + return pbcpr.rawResponse.Header.Get("x-ms-request-id") +} + +// Version returns the value for header x-ms-version. +func (pbcpr PageBlobClearPagesResponse) Version() string { + return pbcpr.rawResponse.Header.Get("x-ms-version") +} + +// PageBlobCopyIncrementalResponse ... +type PageBlobCopyIncrementalResponse struct { + rawResponse *http.Response +} + +// Response returns the raw HTTP response object. +func (pbcir PageBlobCopyIncrementalResponse) Response() *http.Response { + return pbcir.rawResponse +} + +// StatusCode returns the HTTP status code of the response, e.g. 200. +func (pbcir PageBlobCopyIncrementalResponse) StatusCode() int { + return pbcir.rawResponse.StatusCode +} + +// Status returns the HTTP status message of the response, e.g. "200 OK". +func (pbcir PageBlobCopyIncrementalResponse) Status() string { + return pbcir.rawResponse.Status +} + +// CopyID returns the value for header x-ms-copy-id. +func (pbcir PageBlobCopyIncrementalResponse) CopyID() string { + return pbcir.rawResponse.Header.Get("x-ms-copy-id") +} + +// CopyStatus returns the value for header x-ms-copy-status. +func (pbcir PageBlobCopyIncrementalResponse) CopyStatus() CopyStatusType { + return CopyStatusType(pbcir.rawResponse.Header.Get("x-ms-copy-status")) +} + +// Date returns the value for header Date. +func (pbcir PageBlobCopyIncrementalResponse) Date() time.Time { + s := pbcir.rawResponse.Header.Get("Date") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + panic(err) + } + return t +} + +// ErrorCode returns the value for header x-ms-error-code. +func (pbcir PageBlobCopyIncrementalResponse) ErrorCode() string { + return pbcir.rawResponse.Header.Get("x-ms-error-code") +} + +// ETag returns the value for header ETag. +func (pbcir PageBlobCopyIncrementalResponse) ETag() ETag { + return ETag(pbcir.rawResponse.Header.Get("ETag")) +} + +// LastModified returns the value for header Last-Modified. +func (pbcir PageBlobCopyIncrementalResponse) LastModified() time.Time { + s := pbcir.rawResponse.Header.Get("Last-Modified") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + panic(err) + } + return t +} + +// RequestID returns the value for header x-ms-request-id. +func (pbcir PageBlobCopyIncrementalResponse) RequestID() string { + return pbcir.rawResponse.Header.Get("x-ms-request-id") +} + +// Version returns the value for header x-ms-version. +func (pbcir PageBlobCopyIncrementalResponse) Version() string { + return pbcir.rawResponse.Header.Get("x-ms-version") +} + +// PageBlobCreateResponse ... +type PageBlobCreateResponse struct { + rawResponse *http.Response +} + +// Response returns the raw HTTP response object. +func (pbcr PageBlobCreateResponse) Response() *http.Response { + return pbcr.rawResponse +} + +// StatusCode returns the HTTP status code of the response, e.g. 200. +func (pbcr PageBlobCreateResponse) StatusCode() int { + return pbcr.rawResponse.StatusCode +} + +// Status returns the HTTP status message of the response, e.g. "200 OK". +func (pbcr PageBlobCreateResponse) Status() string { + return pbcr.rawResponse.Status +} + +// ContentMD5 returns the value for header Content-MD5. +func (pbcr PageBlobCreateResponse) ContentMD5() []byte { + s := pbcr.rawResponse.Header.Get("Content-MD5") + if s == "" { + return nil + } + b, err := base64.StdEncoding.DecodeString(s) + if err != nil { + panic(err) + } + return b +} + +// Date returns the value for header Date. +func (pbcr PageBlobCreateResponse) Date() time.Time { + s := pbcr.rawResponse.Header.Get("Date") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + panic(err) + } + return t +} + +// ErrorCode returns the value for header x-ms-error-code. +func (pbcr PageBlobCreateResponse) ErrorCode() string { + return pbcr.rawResponse.Header.Get("x-ms-error-code") +} + +// ETag returns the value for header ETag. +func (pbcr PageBlobCreateResponse) ETag() ETag { + return ETag(pbcr.rawResponse.Header.Get("ETag")) +} + +// IsServerEncrypted returns the value for header x-ms-request-server-encrypted. +func (pbcr PageBlobCreateResponse) IsServerEncrypted() string { + return pbcr.rawResponse.Header.Get("x-ms-request-server-encrypted") +} + +// LastModified returns the value for header Last-Modified. +func (pbcr PageBlobCreateResponse) LastModified() time.Time { + s := pbcr.rawResponse.Header.Get("Last-Modified") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + panic(err) + } + return t +} + +// RequestID returns the value for header x-ms-request-id. +func (pbcr PageBlobCreateResponse) RequestID() string { + return pbcr.rawResponse.Header.Get("x-ms-request-id") +} + +// Version returns the value for header x-ms-version. +func (pbcr PageBlobCreateResponse) Version() string { + return pbcr.rawResponse.Header.Get("x-ms-version") +} + +// PageBlobResizeResponse ... +type PageBlobResizeResponse struct { + rawResponse *http.Response +} + +// Response returns the raw HTTP response object. +func (pbrr PageBlobResizeResponse) Response() *http.Response { + return pbrr.rawResponse +} + +// StatusCode returns the HTTP status code of the response, e.g. 200. +func (pbrr PageBlobResizeResponse) StatusCode() int { + return pbrr.rawResponse.StatusCode +} + +// Status returns the HTTP status message of the response, e.g. "200 OK". +func (pbrr PageBlobResizeResponse) Status() string { + return pbrr.rawResponse.Status +} + +// BlobSequenceNumber returns the value for header x-ms-blob-sequence-number. +func (pbrr PageBlobResizeResponse) BlobSequenceNumber() int64 { + s := pbrr.rawResponse.Header.Get("x-ms-blob-sequence-number") + if s == "" { + return -1 + } + i, err := strconv.ParseInt(s, 10, 64) + if err != nil { + panic(err) + } + return i +} + +// Date returns the value for header Date. +func (pbrr PageBlobResizeResponse) Date() time.Time { + s := pbrr.rawResponse.Header.Get("Date") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + panic(err) + } + return t +} + +// ErrorCode returns the value for header x-ms-error-code. +func (pbrr PageBlobResizeResponse) ErrorCode() string { + return pbrr.rawResponse.Header.Get("x-ms-error-code") +} + +// ETag returns the value for header ETag. +func (pbrr PageBlobResizeResponse) ETag() ETag { + return ETag(pbrr.rawResponse.Header.Get("ETag")) +} + +// LastModified returns the value for header Last-Modified. +func (pbrr PageBlobResizeResponse) LastModified() time.Time { + s := pbrr.rawResponse.Header.Get("Last-Modified") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + panic(err) + } + return t +} + +// RequestID returns the value for header x-ms-request-id. +func (pbrr PageBlobResizeResponse) RequestID() string { + return pbrr.rawResponse.Header.Get("x-ms-request-id") +} + +// Version returns the value for header x-ms-version. +func (pbrr PageBlobResizeResponse) Version() string { + return pbrr.rawResponse.Header.Get("x-ms-version") +} + +// PageBlobUpdateSequenceNumberResponse ... +type PageBlobUpdateSequenceNumberResponse struct { + rawResponse *http.Response +} + +// Response returns the raw HTTP response object. +func (pbusnr PageBlobUpdateSequenceNumberResponse) Response() *http.Response { + return pbusnr.rawResponse +} + +// StatusCode returns the HTTP status code of the response, e.g. 200. +func (pbusnr PageBlobUpdateSequenceNumberResponse) StatusCode() int { + return pbusnr.rawResponse.StatusCode +} + +// Status returns the HTTP status message of the response, e.g. "200 OK". +func (pbusnr PageBlobUpdateSequenceNumberResponse) Status() string { + return pbusnr.rawResponse.Status +} + +// BlobSequenceNumber returns the value for header x-ms-blob-sequence-number. +func (pbusnr PageBlobUpdateSequenceNumberResponse) BlobSequenceNumber() int64 { + s := pbusnr.rawResponse.Header.Get("x-ms-blob-sequence-number") + if s == "" { + return -1 + } + i, err := strconv.ParseInt(s, 10, 64) + if err != nil { + panic(err) + } + return i +} + +// Date returns the value for header Date. +func (pbusnr PageBlobUpdateSequenceNumberResponse) Date() time.Time { + s := pbusnr.rawResponse.Header.Get("Date") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + panic(err) + } + return t +} + +// ErrorCode returns the value for header x-ms-error-code. +func (pbusnr PageBlobUpdateSequenceNumberResponse) ErrorCode() string { + return pbusnr.rawResponse.Header.Get("x-ms-error-code") +} + +// ETag returns the value for header ETag. +func (pbusnr PageBlobUpdateSequenceNumberResponse) ETag() ETag { + return ETag(pbusnr.rawResponse.Header.Get("ETag")) +} + +// LastModified returns the value for header Last-Modified. +func (pbusnr PageBlobUpdateSequenceNumberResponse) LastModified() time.Time { + s := pbusnr.rawResponse.Header.Get("Last-Modified") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + panic(err) + } + return t +} + +// RequestID returns the value for header x-ms-request-id. +func (pbusnr PageBlobUpdateSequenceNumberResponse) RequestID() string { + return pbusnr.rawResponse.Header.Get("x-ms-request-id") +} + +// Version returns the value for header x-ms-version. +func (pbusnr PageBlobUpdateSequenceNumberResponse) Version() string { + return pbusnr.rawResponse.Header.Get("x-ms-version") +} + +// PageBlobUploadPagesResponse ... +type PageBlobUploadPagesResponse struct { + rawResponse *http.Response +} + +// Response returns the raw HTTP response object. +func (pbupr PageBlobUploadPagesResponse) Response() *http.Response { + return pbupr.rawResponse +} + +// StatusCode returns the HTTP status code of the response, e.g. 200. +func (pbupr PageBlobUploadPagesResponse) StatusCode() int { + return pbupr.rawResponse.StatusCode +} + +// Status returns the HTTP status message of the response, e.g. "200 OK". +func (pbupr PageBlobUploadPagesResponse) Status() string { + return pbupr.rawResponse.Status +} + +// BlobSequenceNumber returns the value for header x-ms-blob-sequence-number. +func (pbupr PageBlobUploadPagesResponse) BlobSequenceNumber() int64 { + s := pbupr.rawResponse.Header.Get("x-ms-blob-sequence-number") + if s == "" { + return -1 + } + i, err := strconv.ParseInt(s, 10, 64) + if err != nil { + panic(err) + } + return i +} + +// ContentMD5 returns the value for header Content-MD5. +func (pbupr PageBlobUploadPagesResponse) ContentMD5() []byte { + s := pbupr.rawResponse.Header.Get("Content-MD5") + if s == "" { + return nil + } + b, err := base64.StdEncoding.DecodeString(s) + if err != nil { + panic(err) + } + return b +} + +// Date returns the value for header Date. +func (pbupr PageBlobUploadPagesResponse) Date() time.Time { + s := pbupr.rawResponse.Header.Get("Date") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + panic(err) + } + return t +} + +// ErrorCode returns the value for header x-ms-error-code. +func (pbupr PageBlobUploadPagesResponse) ErrorCode() string { + return pbupr.rawResponse.Header.Get("x-ms-error-code") +} + +// ETag returns the value for header ETag. +func (pbupr PageBlobUploadPagesResponse) ETag() ETag { + return ETag(pbupr.rawResponse.Header.Get("ETag")) +} + +// IsServerEncrypted returns the value for header x-ms-request-server-encrypted. +func (pbupr PageBlobUploadPagesResponse) IsServerEncrypted() string { + return pbupr.rawResponse.Header.Get("x-ms-request-server-encrypted") +} + +// LastModified returns the value for header Last-Modified. +func (pbupr PageBlobUploadPagesResponse) LastModified() time.Time { + s := pbupr.rawResponse.Header.Get("Last-Modified") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + panic(err) + } + return t +} + +// RequestID returns the value for header x-ms-request-id. +func (pbupr PageBlobUploadPagesResponse) RequestID() string { + return pbupr.rawResponse.Header.Get("x-ms-request-id") +} + +// Version returns the value for header x-ms-version. +func (pbupr PageBlobUploadPagesResponse) Version() string { + return pbupr.rawResponse.Header.Get("x-ms-version") +} + +// PageList - the list of pages +type PageList struct { + rawResponse *http.Response + PageRange []PageRange `xml:"PageRange"` + ClearRange []ClearRange `xml:"ClearRange"` +} + +// Response returns the raw HTTP response object. +func (pl PageList) Response() *http.Response { + return pl.rawResponse +} + +// StatusCode returns the HTTP status code of the response, e.g. 200. +func (pl PageList) StatusCode() int { + return pl.rawResponse.StatusCode +} + +// Status returns the HTTP status message of the response, e.g. "200 OK". +func (pl PageList) Status() string { + return pl.rawResponse.Status +} + +// BlobContentLength returns the value for header x-ms-blob-content-length. +func (pl PageList) BlobContentLength() int64 { + s := pl.rawResponse.Header.Get("x-ms-blob-content-length") + if s == "" { + return -1 + } + i, err := strconv.ParseInt(s, 10, 64) + if err != nil { + panic(err) + } + return i +} + +// Date returns the value for header Date. +func (pl PageList) Date() time.Time { + s := pl.rawResponse.Header.Get("Date") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + panic(err) + } + return t +} + +// ErrorCode returns the value for header x-ms-error-code. +func (pl PageList) ErrorCode() string { + return pl.rawResponse.Header.Get("x-ms-error-code") +} + +// ETag returns the value for header ETag. +func (pl PageList) ETag() ETag { + return ETag(pl.rawResponse.Header.Get("ETag")) +} + +// LastModified returns the value for header Last-Modified. +func (pl PageList) LastModified() time.Time { + s := pl.rawResponse.Header.Get("Last-Modified") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + panic(err) + } + return t +} + +// RequestID returns the value for header x-ms-request-id. +func (pl PageList) RequestID() string { + return pl.rawResponse.Header.Get("x-ms-request-id") +} + +// Version returns the value for header x-ms-version. +func (pl PageList) Version() string { + return pl.rawResponse.Header.Get("x-ms-version") +} + +// PageRange ... +type PageRange struct { + Start int64 `xml:"Start"` + End int64 `xml:"End"` +} + +// RetentionPolicy - the retention policy +type RetentionPolicy struct { + // Enabled - Indicates whether a retention policy is enabled for the storage service + Enabled bool `xml:"Enabled"` + // Days - Indicates the number of days that metrics or logging or soft-deleted data should be retained. All data older than this value will be deleted + Days *int32 `xml:"Days"` +} + +// ServiceSetPropertiesResponse ... +type ServiceSetPropertiesResponse struct { + rawResponse *http.Response +} + +// Response returns the raw HTTP response object. +func (sspr ServiceSetPropertiesResponse) Response() *http.Response { + return sspr.rawResponse +} + +// StatusCode returns the HTTP status code of the response, e.g. 200. +func (sspr ServiceSetPropertiesResponse) StatusCode() int { + return sspr.rawResponse.StatusCode +} + +// Status returns the HTTP status message of the response, e.g. "200 OK". +func (sspr ServiceSetPropertiesResponse) Status() string { + return sspr.rawResponse.Status +} + +// ErrorCode returns the value for header x-ms-error-code. +func (sspr ServiceSetPropertiesResponse) ErrorCode() string { + return sspr.rawResponse.Header.Get("x-ms-error-code") +} + +// RequestID returns the value for header x-ms-request-id. +func (sspr ServiceSetPropertiesResponse) RequestID() string { + return sspr.rawResponse.Header.Get("x-ms-request-id") +} + +// Version returns the value for header x-ms-version. +func (sspr ServiceSetPropertiesResponse) Version() string { + return sspr.rawResponse.Header.Get("x-ms-version") +} + +// SignedIdentifier - signed identifier +type SignedIdentifier struct { + // ID - a unique id + ID string `xml:"Id"` + // AccessPolicy - The access policy + AccessPolicy AccessPolicy `xml:"AccessPolicy"` +} + +// SignedIdentifiers ... +type SignedIdentifiers struct { + rawResponse *http.Response + Items []SignedIdentifier `xml:"SignedIdentifier"` +} + +// Response returns the raw HTTP response object. +func (si SignedIdentifiers) Response() *http.Response { + return si.rawResponse +} + +// StatusCode returns the HTTP status code of the response, e.g. 200. +func (si SignedIdentifiers) StatusCode() int { + return si.rawResponse.StatusCode +} + +// Status returns the HTTP status message of the response, e.g. "200 OK". +func (si SignedIdentifiers) Status() string { + return si.rawResponse.Status +} + +// BlobPublicAccess returns the value for header x-ms-blob-public-access. +func (si SignedIdentifiers) BlobPublicAccess() PublicAccessType { + return PublicAccessType(si.rawResponse.Header.Get("x-ms-blob-public-access")) +} + +// Date returns the value for header Date. +func (si SignedIdentifiers) Date() time.Time { + s := si.rawResponse.Header.Get("Date") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + panic(err) + } + return t +} + +// ErrorCode returns the value for header x-ms-error-code. +func (si SignedIdentifiers) ErrorCode() string { + return si.rawResponse.Header.Get("x-ms-error-code") +} + +// ETag returns the value for header ETag. +func (si SignedIdentifiers) ETag() ETag { + return ETag(si.rawResponse.Header.Get("ETag")) +} + +// LastModified returns the value for header Last-Modified. +func (si SignedIdentifiers) LastModified() time.Time { + s := si.rawResponse.Header.Get("Last-Modified") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + panic(err) + } + return t +} + +// RequestID returns the value for header x-ms-request-id. +func (si SignedIdentifiers) RequestID() string { + return si.rawResponse.Header.Get("x-ms-request-id") +} + +// Version returns the value for header x-ms-version. +func (si SignedIdentifiers) Version() string { + return si.rawResponse.Header.Get("x-ms-version") +} + +// StorageServiceProperties - Storage Service Properties. +type StorageServiceProperties struct { + rawResponse *http.Response + // Logging - Azure Analytics Logging settings + Logging *Logging `xml:"Logging"` + // HourMetrics - A summary of request statistics grouped by API in hourly aggregates for blobs + HourMetrics *Metrics `xml:"HourMetrics"` + // MinuteMetrics - a summary of request statistics grouped by API in minute aggregates for blobs + MinuteMetrics *Metrics `xml:"MinuteMetrics"` + // Cors - The set of CORS rules. + Cors []CorsRule `xml:"Cors>CorsRule"` + // DefaultServiceVersion - The default version to use for requests to the Blob service if an incoming request's version is not specified. Possible values include version 2008-10-27 and all more recent versions + DefaultServiceVersion *string `xml:"DefaultServiceVersion"` + // DeleteRetentionPolicy - The Delete Retention Policy for the service + DeleteRetentionPolicy *RetentionPolicy `xml:"DeleteRetentionPolicy"` +} + +// Response returns the raw HTTP response object. +func (ssp StorageServiceProperties) Response() *http.Response { + return ssp.rawResponse +} + +// StatusCode returns the HTTP status code of the response, e.g. 200. +func (ssp StorageServiceProperties) StatusCode() int { + return ssp.rawResponse.StatusCode +} + +// Status returns the HTTP status message of the response, e.g. "200 OK". +func (ssp StorageServiceProperties) Status() string { + return ssp.rawResponse.Status +} + +// ErrorCode returns the value for header x-ms-error-code. +func (ssp StorageServiceProperties) ErrorCode() string { + return ssp.rawResponse.Header.Get("x-ms-error-code") +} + +// RequestID returns the value for header x-ms-request-id. +func (ssp StorageServiceProperties) RequestID() string { + return ssp.rawResponse.Header.Get("x-ms-request-id") +} + +// Version returns the value for header x-ms-version. +func (ssp StorageServiceProperties) Version() string { + return ssp.rawResponse.Header.Get("x-ms-version") +} + +// StorageServiceStats - Stats for the storage service. +type StorageServiceStats struct { + rawResponse *http.Response + // GeoReplication - Geo-Replication information for the Secondary Storage Service + GeoReplication *GeoReplication `xml:"GeoReplication"` +} + +// Response returns the raw HTTP response object. +func (sss StorageServiceStats) Response() *http.Response { + return sss.rawResponse +} + +// StatusCode returns the HTTP status code of the response, e.g. 200. +func (sss StorageServiceStats) StatusCode() int { + return sss.rawResponse.StatusCode +} + +// Status returns the HTTP status message of the response, e.g. "200 OK". +func (sss StorageServiceStats) Status() string { + return sss.rawResponse.Status +} + +// Date returns the value for header Date. +func (sss StorageServiceStats) Date() time.Time { + s := sss.rawResponse.Header.Get("Date") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + panic(err) + } + return t +} + +// ErrorCode returns the value for header x-ms-error-code. +func (sss StorageServiceStats) ErrorCode() string { + return sss.rawResponse.Header.Get("x-ms-error-code") +} + +// RequestID returns the value for header x-ms-request-id. +func (sss StorageServiceStats) RequestID() string { + return sss.rawResponse.Header.Get("x-ms-request-id") +} + +// Version returns the value for header x-ms-version. +func (sss StorageServiceStats) Version() string { + return sss.rawResponse.Header.Get("x-ms-version") +} + +const ( + rfc3339Format = "2006-01-02T15:04:05.0000000Z07:00" +) + +// used to convert times from UTC to GMT before sending across the wire +var gmt = time.FixedZone("GMT", 0) + +// internal type used for marshalling time in RFC1123 format +type timeRFC1123 struct { + time.Time +} + +// MarshalText implements the encoding.TextMarshaler interface for timeRFC1123. +func (t timeRFC1123) MarshalText() ([]byte, error) { + return []byte(t.Format(time.RFC1123)), nil +} + +// UnmarshalText implements the encoding.TextUnmarshaler interface for timeRFC1123. +func (t *timeRFC1123) UnmarshalText(data []byte) (err error) { + t.Time, err = time.Parse(time.RFC1123, string(data)) + return +} + +// internal type used for marshalling time in RFC3339 format +type timeRFC3339 struct { + time.Time +} + +// MarshalText implements the encoding.TextMarshaler interface for timeRFC3339. +func (t timeRFC3339) MarshalText() ([]byte, error) { + return []byte(t.Format(rfc3339Format)), nil +} + +// UnmarshalText implements the encoding.TextUnmarshaler interface for timeRFC3339. +func (t *timeRFC3339) UnmarshalText(data []byte) (err error) { + t.Time, err = time.Parse(rfc3339Format, string(data)) + return +} + +// internal type used for marshalling +type accessPolicy struct { + Start timeRFC3339 `xml:"Start"` + Expiry timeRFC3339 `xml:"Expiry"` + Permission string `xml:"Permission"` +} + +// internal type used for marshalling +type blobProperties struct { + LastModified timeRFC1123 `xml:"Last-Modified"` + Etag ETag `xml:"Etag"` + ContentLength *int64 `xml:"Content-Length"` + ContentType *string `xml:"Content-Type"` + ContentEncoding *string `xml:"Content-Encoding"` + ContentLanguage *string `xml:"Content-Language"` + ContentMD5 []byte `xml:"Content-MD5"` + ContentDisposition *string `xml:"Content-Disposition"` + CacheControl *string `xml:"Cache-Control"` + BlobSequenceNumber *int64 `xml:"x-ms-blob-sequence-number"` + BlobType BlobType `xml:"BlobType"` + LeaseStatus LeaseStatusType `xml:"LeaseStatus"` + LeaseState LeaseStateType `xml:"LeaseState"` + LeaseDuration LeaseDurationType `xml:"LeaseDuration"` + CopyID *string `xml:"CopyId"` + CopyStatus CopyStatusType `xml:"CopyStatus"` + CopySource *string `xml:"CopySource"` + CopyProgress *string `xml:"CopyProgress"` + CopyCompletionTime *timeRFC1123 `xml:"CopyCompletionTime"` + CopyStatusDescription *string `xml:"CopyStatusDescription"` + ServerEncrypted *bool `xml:"ServerEncrypted"` + IncrementalCopy *bool `xml:"IncrementalCopy"` + DestinationSnapshot *string `xml:"DestinationSnapshot"` + DeletedTime *timeRFC1123 `xml:"DeletedTime"` + RemainingRetentionDays *int32 `xml:"RemainingRetentionDays"` + AccessTier AccessTierType `xml:"AccessTier"` + AccessTierInferred *bool `xml:"AccessTierInferred"` + ArchiveStatus ArchiveStatusType `xml:"ArchiveStatus"` +} + +// internal type used for marshalling +type containerProperties struct { + LastModified timeRFC1123 `xml:"Last-Modified"` + Etag ETag `xml:"Etag"` + LeaseStatus LeaseStatusType `xml:"LeaseStatus"` + LeaseState LeaseStateType `xml:"LeaseState"` + LeaseDuration LeaseDurationType `xml:"LeaseDuration"` + PublicAccess PublicAccessType `xml:"PublicAccess"` +} + +// internal type used for marshalling +type geoReplication struct { + Status GeoReplicationStatusType `xml:"Status"` + LastSyncTime timeRFC1123 `xml:"LastSyncTime"` +} diff --git a/vendor/github.com/Azure/azure-storage-blob-go/2018-03-28/azblob/zz_generated_page_blob.go b/vendor/github.com/Azure/azure-storage-blob-go/2018-03-28/azblob/zz_generated_page_blob.go new file mode 100644 index 000000000000..ce6507080e79 --- /dev/null +++ b/vendor/github.com/Azure/azure-storage-blob-go/2018-03-28/azblob/zz_generated_page_blob.go @@ -0,0 +1,794 @@ +package azblob + +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "context" + "encoding/base64" + "encoding/xml" + "github.com/Azure/azure-pipeline-go/pipeline" + "io" + "io/ioutil" + "net/http" + "net/url" + "strconv" + "time" +) + +// pageBlobClient is the client for the PageBlob methods of the Azblob service. +type pageBlobClient struct { + managementClient +} + +// newPageBlobClient creates an instance of the pageBlobClient client. +func newPageBlobClient(url url.URL, p pipeline.Pipeline) pageBlobClient { + return pageBlobClient{newManagementClient(url, p)} +} + +// ClearPages the Clear Pages operation clears a set of pages from a page blob +// +// contentLength is the length of the request. timeout is the timeout parameter is expressed in seconds. For more +// information, see Setting +// Timeouts for Blob Service Operations. rangeParameter is return only the bytes of the blob in the specified +// range. leaseID is if specified, the operation only succeeds if the container's lease is active and matches this ID. +// ifSequenceNumberLessThanOrEqualTo is specify this header value to operate only on a blob if it has a sequence number +// less than or equal to the specified. ifSequenceNumberLessThan is specify this header value to operate only on a blob +// if it has a sequence number less than the specified. ifSequenceNumberEqualTo is specify this header value to operate +// only on a blob if it has the specified sequence number. ifModifiedSince is specify this header value to operate only +// on a blob if it has been modified since the specified date/time. ifUnmodifiedSince is specify this header value to +// operate only on a blob if it has not been modified since the specified date/time. ifMatches is specify an ETag value +// to operate only on blobs with a matching value. ifNoneMatch is specify an ETag value to operate only on blobs +// without a matching value. requestID is provides a client-generated, opaque value with a 1 KB character limit that is +// recorded in the analytics logs when storage analytics logging is enabled. +func (client pageBlobClient) ClearPages(ctx context.Context, contentLength int64, timeout *int32, rangeParameter *string, leaseID *string, ifSequenceNumberLessThanOrEqualTo *int64, ifSequenceNumberLessThan *int64, ifSequenceNumberEqualTo *int64, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatches *ETag, ifNoneMatch *ETag, requestID *string) (*PageBlobClearPagesResponse, error) { + if err := validate([]validation{ + {targetValue: timeout, + constraints: []constraint{{target: "timeout", name: null, rule: false, + chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { + return nil, err + } + req, err := client.clearPagesPreparer(contentLength, timeout, rangeParameter, leaseID, ifSequenceNumberLessThanOrEqualTo, ifSequenceNumberLessThan, ifSequenceNumberEqualTo, ifModifiedSince, ifUnmodifiedSince, ifMatches, ifNoneMatch, requestID) + if err != nil { + return nil, err + } + resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.clearPagesResponder}, req) + if err != nil { + return nil, err + } + return resp.(*PageBlobClearPagesResponse), err +} + +// clearPagesPreparer prepares the ClearPages request. +func (client pageBlobClient) clearPagesPreparer(contentLength int64, timeout *int32, rangeParameter *string, leaseID *string, ifSequenceNumberLessThanOrEqualTo *int64, ifSequenceNumberLessThan *int64, ifSequenceNumberEqualTo *int64, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatches *ETag, ifNoneMatch *ETag, requestID *string) (pipeline.Request, error) { + req, err := pipeline.NewRequest("PUT", client.url, nil) + if err != nil { + return req, pipeline.NewError(err, "failed to create request") + } + params := req.URL.Query() + if timeout != nil { + params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) + } + params.Set("comp", "page") + req.URL.RawQuery = params.Encode() + req.Header.Set("Content-Length", strconv.FormatInt(contentLength, 10)) + if rangeParameter != nil { + req.Header.Set("x-ms-range", *rangeParameter) + } + if leaseID != nil { + req.Header.Set("x-ms-lease-id", *leaseID) + } + if ifSequenceNumberLessThanOrEqualTo != nil { + req.Header.Set("x-ms-if-sequence-number-le", strconv.FormatInt(*ifSequenceNumberLessThanOrEqualTo, 10)) + } + if ifSequenceNumberLessThan != nil { + req.Header.Set("x-ms-if-sequence-number-lt", strconv.FormatInt(*ifSequenceNumberLessThan, 10)) + } + if ifSequenceNumberEqualTo != nil { + req.Header.Set("x-ms-if-sequence-number-eq", strconv.FormatInt(*ifSequenceNumberEqualTo, 10)) + } + if ifModifiedSince != nil { + req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123)) + } + if ifUnmodifiedSince != nil { + req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123)) + } + if ifMatches != nil { + req.Header.Set("If-Match", string(*ifMatches)) + } + if ifNoneMatch != nil { + req.Header.Set("If-None-Match", string(*ifNoneMatch)) + } + req.Header.Set("x-ms-version", ServiceVersion) + if requestID != nil { + req.Header.Set("x-ms-client-request-id", *requestID) + } + req.Header.Set("x-ms-page-write", "clear") + return req, nil +} + +// clearPagesResponder handles the response to the ClearPages request. +func (client pageBlobClient) clearPagesResponder(resp pipeline.Response) (pipeline.Response, error) { + err := validateResponse(resp, http.StatusOK, http.StatusCreated) + if resp == nil { + return nil, err + } + io.Copy(ioutil.Discard, resp.Response().Body) + resp.Response().Body.Close() + return &PageBlobClearPagesResponse{rawResponse: resp.Response()}, err +} + +// CopyIncremental the Copy Incremental operation copies a snapshot of the source page blob to a destination page blob. +// The snapshot is copied such that only the differential changes between the previously copied snapshot are +// transferred to the destination. The copied snapshots are complete copies of the original snapshot and can be read or +// copied from as usual. This API is supported since REST version 2016-05-31. +// +// copySource is specifies the name of the source page blob snapshot. This value is a URL of up to 2 KB in length that +// specifies a page blob snapshot. The value should be URL-encoded as it would appear in a request URI. The source blob +// must either be public or must be authenticated via a shared access signature. timeout is the timeout parameter is +// expressed in seconds. For more information, see Setting +// Timeouts for Blob Service Operations. metadata is optional. Specifies a user-defined name-value pair associated +// with the blob. If no name-value pairs are specified, the operation will copy the metadata from the source blob or +// file to the destination blob. If one or more name-value pairs are specified, the destination blob is created with +// the specified metadata, and metadata is not copied from the source blob or file. Note that beginning with version +// 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing +// Containers, Blobs, and Metadata for more information. ifModifiedSince is specify this header value to operate only +// on a blob if it has been modified since the specified date/time. ifUnmodifiedSince is specify this header value to +// operate only on a blob if it has not been modified since the specified date/time. ifMatches is specify an ETag value +// to operate only on blobs with a matching value. ifNoneMatch is specify an ETag value to operate only on blobs +// without a matching value. requestID is provides a client-generated, opaque value with a 1 KB character limit that is +// recorded in the analytics logs when storage analytics logging is enabled. +func (client pageBlobClient) CopyIncremental(ctx context.Context, copySource string, timeout *int32, metadata map[string]string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatches *ETag, ifNoneMatch *ETag, requestID *string) (*PageBlobCopyIncrementalResponse, error) { + if err := validate([]validation{ + {targetValue: timeout, + constraints: []constraint{{target: "timeout", name: null, rule: false, + chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}, + {targetValue: metadata, + constraints: []constraint{{target: "metadata", name: null, rule: false, + chain: []constraint{{target: "metadata", name: pattern, rule: `^[a-zA-Z]+$`, chain: nil}}}}}}); err != nil { + return nil, err + } + req, err := client.copyIncrementalPreparer(copySource, timeout, metadata, ifModifiedSince, ifUnmodifiedSince, ifMatches, ifNoneMatch, requestID) + if err != nil { + return nil, err + } + resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.copyIncrementalResponder}, req) + if err != nil { + return nil, err + } + return resp.(*PageBlobCopyIncrementalResponse), err +} + +// copyIncrementalPreparer prepares the CopyIncremental request. +func (client pageBlobClient) copyIncrementalPreparer(copySource string, timeout *int32, metadata map[string]string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatches *ETag, ifNoneMatch *ETag, requestID *string) (pipeline.Request, error) { + req, err := pipeline.NewRequest("PUT", client.url, nil) + if err != nil { + return req, pipeline.NewError(err, "failed to create request") + } + params := req.URL.Query() + if timeout != nil { + params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) + } + params.Set("comp", "incrementalcopy") + req.URL.RawQuery = params.Encode() + if metadata != nil { + for k, v := range metadata { + req.Header.Set("x-ms-meta-"+k, v) + } + } + if ifModifiedSince != nil { + req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123)) + } + if ifUnmodifiedSince != nil { + req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123)) + } + if ifMatches != nil { + req.Header.Set("If-Match", string(*ifMatches)) + } + if ifNoneMatch != nil { + req.Header.Set("If-None-Match", string(*ifNoneMatch)) + } + req.Header.Set("x-ms-copy-source", copySource) + req.Header.Set("x-ms-version", ServiceVersion) + if requestID != nil { + req.Header.Set("x-ms-client-request-id", *requestID) + } + return req, nil +} + +// copyIncrementalResponder handles the response to the CopyIncremental request. +func (client pageBlobClient) copyIncrementalResponder(resp pipeline.Response) (pipeline.Response, error) { + err := validateResponse(resp, http.StatusOK, http.StatusAccepted) + if resp == nil { + return nil, err + } + io.Copy(ioutil.Discard, resp.Response().Body) + resp.Response().Body.Close() + return &PageBlobCopyIncrementalResponse{rawResponse: resp.Response()}, err +} + +// Create the Create operation creates a new page blob. +// +// contentLength is the length of the request. timeout is the timeout parameter is expressed in seconds. For more +// information, see Setting +// Timeouts for Blob Service Operations. blobContentType is optional. Sets the blob's content type. If specified, +// this property is stored with the blob and returned with a read request. blobContentEncoding is optional. Sets the +// blob's content encoding. If specified, this property is stored with the blob and returned with a read request. +// blobContentLanguage is optional. Set the blob's content language. If specified, this property is stored with the +// blob and returned with a read request. blobContentMD5 is optional. An MD5 hash of the blob content. Note that this +// hash is not validated, as the hashes for the individual blocks were validated when each was uploaded. +// blobCacheControl is optional. Sets the blob's cache control. If specified, this property is stored with the blob and +// returned with a read request. metadata is optional. Specifies a user-defined name-value pair associated with the +// blob. If no name-value pairs are specified, the operation will copy the metadata from the source blob or file to the +// destination blob. If one or more name-value pairs are specified, the destination blob is created with the specified +// metadata, and metadata is not copied from the source blob or file. Note that beginning with version 2009-09-19, +// metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, Blobs, and +// Metadata for more information. leaseID is if specified, the operation only succeeds if the container's lease is +// active and matches this ID. blobContentDisposition is optional. Sets the blob's Content-Disposition header. +// ifModifiedSince is specify this header value to operate only on a blob if it has been modified since the specified +// date/time. ifUnmodifiedSince is specify this header value to operate only on a blob if it has not been modified +// since the specified date/time. ifMatches is specify an ETag value to operate only on blobs with a matching value. +// ifNoneMatch is specify an ETag value to operate only on blobs without a matching value. blobContentLength is this +// header specifies the maximum size for the page blob, up to 1 TB. The page blob size must be aligned to a 512-byte +// boundary. blobSequenceNumber is set for page blobs only. The sequence number is a user-controlled value that you can +// use to track requests. The value of the sequence number must be between 0 and 2^63 - 1. requestID is provides a +// client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage +// analytics logging is enabled. +func (client pageBlobClient) Create(ctx context.Context, contentLength int64, timeout *int32, blobContentType *string, blobContentEncoding *string, blobContentLanguage *string, blobContentMD5 []byte, blobCacheControl *string, metadata map[string]string, leaseID *string, blobContentDisposition *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatches *ETag, ifNoneMatch *ETag, blobContentLength *int64, blobSequenceNumber *int64, requestID *string) (*PageBlobCreateResponse, error) { + if err := validate([]validation{ + {targetValue: timeout, + constraints: []constraint{{target: "timeout", name: null, rule: false, + chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}, + {targetValue: metadata, + constraints: []constraint{{target: "metadata", name: null, rule: false, + chain: []constraint{{target: "metadata", name: pattern, rule: `^[a-zA-Z]+$`, chain: nil}}}}}}); err != nil { + return nil, err + } + req, err := client.createPreparer(contentLength, timeout, blobContentType, blobContentEncoding, blobContentLanguage, blobContentMD5, blobCacheControl, metadata, leaseID, blobContentDisposition, ifModifiedSince, ifUnmodifiedSince, ifMatches, ifNoneMatch, blobContentLength, blobSequenceNumber, requestID) + if err != nil { + return nil, err + } + resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.createResponder}, req) + if err != nil { + return nil, err + } + return resp.(*PageBlobCreateResponse), err +} + +// createPreparer prepares the Create request. +func (client pageBlobClient) createPreparer(contentLength int64, timeout *int32, blobContentType *string, blobContentEncoding *string, blobContentLanguage *string, blobContentMD5 []byte, blobCacheControl *string, metadata map[string]string, leaseID *string, blobContentDisposition *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatches *ETag, ifNoneMatch *ETag, blobContentLength *int64, blobSequenceNumber *int64, requestID *string) (pipeline.Request, error) { + req, err := pipeline.NewRequest("PUT", client.url, nil) + if err != nil { + return req, pipeline.NewError(err, "failed to create request") + } + params := req.URL.Query() + if timeout != nil { + params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) + } + req.URL.RawQuery = params.Encode() + req.Header.Set("Content-Length", strconv.FormatInt(contentLength, 10)) + if blobContentType != nil { + req.Header.Set("x-ms-blob-content-type", *blobContentType) + } + if blobContentEncoding != nil { + req.Header.Set("x-ms-blob-content-encoding", *blobContentEncoding) + } + if blobContentLanguage != nil { + req.Header.Set("x-ms-blob-content-language", *blobContentLanguage) + } + if blobContentMD5 != nil { + req.Header.Set("x-ms-blob-content-md5", base64.StdEncoding.EncodeToString(blobContentMD5)) + } + if blobCacheControl != nil { + req.Header.Set("x-ms-blob-cache-control", *blobCacheControl) + } + if metadata != nil { + for k, v := range metadata { + req.Header.Set("x-ms-meta-"+k, v) + } + } + if leaseID != nil { + req.Header.Set("x-ms-lease-id", *leaseID) + } + if blobContentDisposition != nil { + req.Header.Set("x-ms-blob-content-disposition", *blobContentDisposition) + } + if ifModifiedSince != nil { + req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123)) + } + if ifUnmodifiedSince != nil { + req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123)) + } + if ifMatches != nil { + req.Header.Set("If-Match", string(*ifMatches)) + } + if ifNoneMatch != nil { + req.Header.Set("If-None-Match", string(*ifNoneMatch)) + } + if blobContentLength != nil { + req.Header.Set("x-ms-blob-content-length", strconv.FormatInt(*blobContentLength, 10)) + } + if blobSequenceNumber != nil { + req.Header.Set("x-ms-blob-sequence-number", strconv.FormatInt(*blobSequenceNumber, 10)) + } + req.Header.Set("x-ms-version", ServiceVersion) + if requestID != nil { + req.Header.Set("x-ms-client-request-id", *requestID) + } + req.Header.Set("x-ms-blob-type", "PageBlob") + return req, nil +} + +// createResponder handles the response to the Create request. +func (client pageBlobClient) createResponder(resp pipeline.Response) (pipeline.Response, error) { + err := validateResponse(resp, http.StatusOK, http.StatusCreated) + if resp == nil { + return nil, err + } + io.Copy(ioutil.Discard, resp.Response().Body) + resp.Response().Body.Close() + return &PageBlobCreateResponse{rawResponse: resp.Response()}, err +} + +// GetPageRanges the Get Page Ranges operation returns the list of valid page ranges for a page blob or snapshot of a +// page blob +// +// snapshot is the snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to +// retrieve. For more information on working with blob snapshots, see Creating +// a Snapshot of a Blob. timeout is the timeout parameter is expressed in seconds. For more information, see Setting +// Timeouts for Blob Service Operations. rangeParameter is return only the bytes of the blob in the specified +// range. leaseID is if specified, the operation only succeeds if the container's lease is active and matches this ID. +// ifModifiedSince is specify this header value to operate only on a blob if it has been modified since the specified +// date/time. ifUnmodifiedSince is specify this header value to operate only on a blob if it has not been modified +// since the specified date/time. ifMatches is specify an ETag value to operate only on blobs with a matching value. +// ifNoneMatch is specify an ETag value to operate only on blobs without a matching value. requestID is provides a +// client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage +// analytics logging is enabled. +func (client pageBlobClient) GetPageRanges(ctx context.Context, snapshot *string, timeout *int32, rangeParameter *string, leaseID *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatches *ETag, ifNoneMatch *ETag, requestID *string) (*PageList, error) { + if err := validate([]validation{ + {targetValue: timeout, + constraints: []constraint{{target: "timeout", name: null, rule: false, + chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { + return nil, err + } + req, err := client.getPageRangesPreparer(snapshot, timeout, rangeParameter, leaseID, ifModifiedSince, ifUnmodifiedSince, ifMatches, ifNoneMatch, requestID) + if err != nil { + return nil, err + } + resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.getPageRangesResponder}, req) + if err != nil { + return nil, err + } + return resp.(*PageList), err +} + +// getPageRangesPreparer prepares the GetPageRanges request. +func (client pageBlobClient) getPageRangesPreparer(snapshot *string, timeout *int32, rangeParameter *string, leaseID *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatches *ETag, ifNoneMatch *ETag, requestID *string) (pipeline.Request, error) { + req, err := pipeline.NewRequest("GET", client.url, nil) + if err != nil { + return req, pipeline.NewError(err, "failed to create request") + } + params := req.URL.Query() + if snapshot != nil && len(*snapshot) > 0 { + params.Set("snapshot", *snapshot) + } + if timeout != nil { + params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) + } + params.Set("comp", "pagelist") + req.URL.RawQuery = params.Encode() + if rangeParameter != nil { + req.Header.Set("x-ms-range", *rangeParameter) + } + if leaseID != nil { + req.Header.Set("x-ms-lease-id", *leaseID) + } + if ifModifiedSince != nil { + req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123)) + } + if ifUnmodifiedSince != nil { + req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123)) + } + if ifMatches != nil { + req.Header.Set("If-Match", string(*ifMatches)) + } + if ifNoneMatch != nil { + req.Header.Set("If-None-Match", string(*ifNoneMatch)) + } + req.Header.Set("x-ms-version", ServiceVersion) + if requestID != nil { + req.Header.Set("x-ms-client-request-id", *requestID) + } + return req, nil +} + +// getPageRangesResponder handles the response to the GetPageRanges request. +func (client pageBlobClient) getPageRangesResponder(resp pipeline.Response) (pipeline.Response, error) { + err := validateResponse(resp, http.StatusOK) + if resp == nil { + return nil, err + } + result := &PageList{rawResponse: resp.Response()} + if err != nil { + return result, err + } + defer resp.Response().Body.Close() + b, err := ioutil.ReadAll(resp.Response().Body) + if err != nil { + return result, NewResponseError(err, resp.Response(), "failed to read response body") + } + if len(b) > 0 { + b = removeBOM(b) + err = xml.Unmarshal(b, result) + if err != nil { + return result, NewResponseError(err, resp.Response(), "failed to unmarshal response body") + } + } + return result, nil +} + +// GetPageRangesDiff [Update] The Get Page Ranges Diff operation returns the list of valid page ranges for a page blob +// that were changed between target blob and previous snapshot. +// +// snapshot is the snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to +// retrieve. For more information on working with blob snapshots, see Creating +// a Snapshot of a Blob. timeout is the timeout parameter is expressed in seconds. For more information, see Setting +// Timeouts for Blob Service Operations. prevsnapshot is optional in version 2015-07-08 and newer. The prevsnapshot +// parameter is a DateTime value that specifies that the response will contain only pages that were changed between +// target blob and previous snapshot. Changed pages include both updated and cleared pages. The target blob may be a +// snapshot, as long as the snapshot specified by prevsnapshot is the older of the two. Note that incremental snapshots +// are currently supported only for blobs created on or after January 1, 2016. rangeParameter is return only the bytes +// of the blob in the specified range. leaseID is if specified, the operation only succeeds if the container's lease is +// active and matches this ID. ifModifiedSince is specify this header value to operate only on a blob if it has been +// modified since the specified date/time. ifUnmodifiedSince is specify this header value to operate only on a blob if +// it has not been modified since the specified date/time. ifMatches is specify an ETag value to operate only on blobs +// with a matching value. ifNoneMatch is specify an ETag value to operate only on blobs without a matching value. +// requestID is provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics +// logs when storage analytics logging is enabled. +func (client pageBlobClient) GetPageRangesDiff(ctx context.Context, snapshot *string, timeout *int32, prevsnapshot *string, rangeParameter *string, leaseID *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatches *ETag, ifNoneMatch *ETag, requestID *string) (*PageList, error) { + if err := validate([]validation{ + {targetValue: timeout, + constraints: []constraint{{target: "timeout", name: null, rule: false, + chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { + return nil, err + } + req, err := client.getPageRangesDiffPreparer(snapshot, timeout, prevsnapshot, rangeParameter, leaseID, ifModifiedSince, ifUnmodifiedSince, ifMatches, ifNoneMatch, requestID) + if err != nil { + return nil, err + } + resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.getPageRangesDiffResponder}, req) + if err != nil { + return nil, err + } + return resp.(*PageList), err +} + +// getPageRangesDiffPreparer prepares the GetPageRangesDiff request. +func (client pageBlobClient) getPageRangesDiffPreparer(snapshot *string, timeout *int32, prevsnapshot *string, rangeParameter *string, leaseID *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatches *ETag, ifNoneMatch *ETag, requestID *string) (pipeline.Request, error) { + req, err := pipeline.NewRequest("GET", client.url, nil) + if err != nil { + return req, pipeline.NewError(err, "failed to create request") + } + params := req.URL.Query() + if snapshot != nil && len(*snapshot) > 0 { + params.Set("snapshot", *snapshot) + } + if timeout != nil { + params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) + } + if prevsnapshot != nil && len(*prevsnapshot) > 0 { + params.Set("prevsnapshot", *prevsnapshot) + } + params.Set("comp", "pagelist") + req.URL.RawQuery = params.Encode() + if rangeParameter != nil { + req.Header.Set("x-ms-range", *rangeParameter) + } + if leaseID != nil { + req.Header.Set("x-ms-lease-id", *leaseID) + } + if ifModifiedSince != nil { + req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123)) + } + if ifUnmodifiedSince != nil { + req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123)) + } + if ifMatches != nil { + req.Header.Set("If-Match", string(*ifMatches)) + } + if ifNoneMatch != nil { + req.Header.Set("If-None-Match", string(*ifNoneMatch)) + } + req.Header.Set("x-ms-version", ServiceVersion) + if requestID != nil { + req.Header.Set("x-ms-client-request-id", *requestID) + } + return req, nil +} + +// getPageRangesDiffResponder handles the response to the GetPageRangesDiff request. +func (client pageBlobClient) getPageRangesDiffResponder(resp pipeline.Response) (pipeline.Response, error) { + err := validateResponse(resp, http.StatusOK) + if resp == nil { + return nil, err + } + result := &PageList{rawResponse: resp.Response()} + if err != nil { + return result, err + } + defer resp.Response().Body.Close() + b, err := ioutil.ReadAll(resp.Response().Body) + if err != nil { + return result, NewResponseError(err, resp.Response(), "failed to read response body") + } + if len(b) > 0 { + b = removeBOM(b) + err = xml.Unmarshal(b, result) + if err != nil { + return result, NewResponseError(err, resp.Response(), "failed to unmarshal response body") + } + } + return result, nil +} + +// Resize resize the Blob +// +// blobContentLength is this header specifies the maximum size for the page blob, up to 1 TB. The page blob size must +// be aligned to a 512-byte boundary. timeout is the timeout parameter is expressed in seconds. For more information, +// see Setting +// Timeouts for Blob Service Operations. leaseID is if specified, the operation only succeeds if the container's +// lease is active and matches this ID. ifModifiedSince is specify this header value to operate only on a blob if it +// has been modified since the specified date/time. ifUnmodifiedSince is specify this header value to operate only on a +// blob if it has not been modified since the specified date/time. ifMatches is specify an ETag value to operate only +// on blobs with a matching value. ifNoneMatch is specify an ETag value to operate only on blobs without a matching +// value. requestID is provides a client-generated, opaque value with a 1 KB character limit that is recorded in the +// analytics logs when storage analytics logging is enabled. +func (client pageBlobClient) Resize(ctx context.Context, blobContentLength int64, timeout *int32, leaseID *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatches *ETag, ifNoneMatch *ETag, requestID *string) (*PageBlobResizeResponse, error) { + if err := validate([]validation{ + {targetValue: timeout, + constraints: []constraint{{target: "timeout", name: null, rule: false, + chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { + return nil, err + } + req, err := client.resizePreparer(blobContentLength, timeout, leaseID, ifModifiedSince, ifUnmodifiedSince, ifMatches, ifNoneMatch, requestID) + if err != nil { + return nil, err + } + resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.resizeResponder}, req) + if err != nil { + return nil, err + } + return resp.(*PageBlobResizeResponse), err +} + +// resizePreparer prepares the Resize request. +func (client pageBlobClient) resizePreparer(blobContentLength int64, timeout *int32, leaseID *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatches *ETag, ifNoneMatch *ETag, requestID *string) (pipeline.Request, error) { + req, err := pipeline.NewRequest("PUT", client.url, nil) + if err != nil { + return req, pipeline.NewError(err, "failed to create request") + } + params := req.URL.Query() + if timeout != nil { + params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) + } + params.Set("comp", "properties") + req.URL.RawQuery = params.Encode() + if leaseID != nil { + req.Header.Set("x-ms-lease-id", *leaseID) + } + if ifModifiedSince != nil { + req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123)) + } + if ifUnmodifiedSince != nil { + req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123)) + } + if ifMatches != nil { + req.Header.Set("If-Match", string(*ifMatches)) + } + if ifNoneMatch != nil { + req.Header.Set("If-None-Match", string(*ifNoneMatch)) + } + req.Header.Set("x-ms-blob-content-length", strconv.FormatInt(blobContentLength, 10)) + req.Header.Set("x-ms-version", ServiceVersion) + if requestID != nil { + req.Header.Set("x-ms-client-request-id", *requestID) + } + return req, nil +} + +// resizeResponder handles the response to the Resize request. +func (client pageBlobClient) resizeResponder(resp pipeline.Response) (pipeline.Response, error) { + err := validateResponse(resp, http.StatusOK) + if resp == nil { + return nil, err + } + io.Copy(ioutil.Discard, resp.Response().Body) + resp.Response().Body.Close() + return &PageBlobResizeResponse{rawResponse: resp.Response()}, err +} + +// UpdateSequenceNumber update the sequence number of the blob +// +// sequenceNumberAction is required if the x-ms-blob-sequence-number header is set for the request. This property +// applies to page blobs only. This property indicates how the service should modify the blob's sequence number timeout +// is the timeout parameter is expressed in seconds. For more information, see Setting +// Timeouts for Blob Service Operations. leaseID is if specified, the operation only succeeds if the container's +// lease is active and matches this ID. ifModifiedSince is specify this header value to operate only on a blob if it +// has been modified since the specified date/time. ifUnmodifiedSince is specify this header value to operate only on a +// blob if it has not been modified since the specified date/time. ifMatches is specify an ETag value to operate only +// on blobs with a matching value. ifNoneMatch is specify an ETag value to operate only on blobs without a matching +// value. blobSequenceNumber is set for page blobs only. The sequence number is a user-controlled value that you can +// use to track requests. The value of the sequence number must be between 0 and 2^63 - 1. requestID is provides a +// client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage +// analytics logging is enabled. +func (client pageBlobClient) UpdateSequenceNumber(ctx context.Context, sequenceNumberAction SequenceNumberActionType, timeout *int32, leaseID *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatches *ETag, ifNoneMatch *ETag, blobSequenceNumber *int64, requestID *string) (*PageBlobUpdateSequenceNumberResponse, error) { + if err := validate([]validation{ + {targetValue: timeout, + constraints: []constraint{{target: "timeout", name: null, rule: false, + chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { + return nil, err + } + req, err := client.updateSequenceNumberPreparer(sequenceNumberAction, timeout, leaseID, ifModifiedSince, ifUnmodifiedSince, ifMatches, ifNoneMatch, blobSequenceNumber, requestID) + if err != nil { + return nil, err + } + resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.updateSequenceNumberResponder}, req) + if err != nil { + return nil, err + } + return resp.(*PageBlobUpdateSequenceNumberResponse), err +} + +// updateSequenceNumberPreparer prepares the UpdateSequenceNumber request. +func (client pageBlobClient) updateSequenceNumberPreparer(sequenceNumberAction SequenceNumberActionType, timeout *int32, leaseID *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatches *ETag, ifNoneMatch *ETag, blobSequenceNumber *int64, requestID *string) (pipeline.Request, error) { + req, err := pipeline.NewRequest("PUT", client.url, nil) + if err != nil { + return req, pipeline.NewError(err, "failed to create request") + } + params := req.URL.Query() + if timeout != nil { + params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) + } + params.Set("comp", "properties") + req.URL.RawQuery = params.Encode() + if leaseID != nil { + req.Header.Set("x-ms-lease-id", *leaseID) + } + if ifModifiedSince != nil { + req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123)) + } + if ifUnmodifiedSince != nil { + req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123)) + } + if ifMatches != nil { + req.Header.Set("If-Match", string(*ifMatches)) + } + if ifNoneMatch != nil { + req.Header.Set("If-None-Match", string(*ifNoneMatch)) + } + req.Header.Set("x-ms-sequence-number-action", string(sequenceNumberAction)) + if blobSequenceNumber != nil { + req.Header.Set("x-ms-blob-sequence-number", strconv.FormatInt(*blobSequenceNumber, 10)) + } + req.Header.Set("x-ms-version", ServiceVersion) + if requestID != nil { + req.Header.Set("x-ms-client-request-id", *requestID) + } + return req, nil +} + +// updateSequenceNumberResponder handles the response to the UpdateSequenceNumber request. +func (client pageBlobClient) updateSequenceNumberResponder(resp pipeline.Response) (pipeline.Response, error) { + err := validateResponse(resp, http.StatusOK) + if resp == nil { + return nil, err + } + io.Copy(ioutil.Discard, resp.Response().Body) + resp.Response().Body.Close() + return &PageBlobUpdateSequenceNumberResponse{rawResponse: resp.Response()}, err +} + +// UploadPages the Upload Pages operation writes a range of pages to a page blob +// +// body is initial data body will be closed upon successful return. Callers should ensure closure when receiving an +// error.contentLength is the length of the request. timeout is the timeout parameter is expressed in seconds. For more +// information, see Setting +// Timeouts for Blob Service Operations. rangeParameter is return only the bytes of the blob in the specified +// range. leaseID is if specified, the operation only succeeds if the container's lease is active and matches this ID. +// ifSequenceNumberLessThanOrEqualTo is specify this header value to operate only on a blob if it has a sequence number +// less than or equal to the specified. ifSequenceNumberLessThan is specify this header value to operate only on a blob +// if it has a sequence number less than the specified. ifSequenceNumberEqualTo is specify this header value to operate +// only on a blob if it has the specified sequence number. ifModifiedSince is specify this header value to operate only +// on a blob if it has been modified since the specified date/time. ifUnmodifiedSince is specify this header value to +// operate only on a blob if it has not been modified since the specified date/time. ifMatches is specify an ETag value +// to operate only on blobs with a matching value. ifNoneMatch is specify an ETag value to operate only on blobs +// without a matching value. requestID is provides a client-generated, opaque value with a 1 KB character limit that is +// recorded in the analytics logs when storage analytics logging is enabled. +func (client pageBlobClient) UploadPages(ctx context.Context, body io.ReadSeeker, contentLength int64, timeout *int32, rangeParameter *string, leaseID *string, ifSequenceNumberLessThanOrEqualTo *int64, ifSequenceNumberLessThan *int64, ifSequenceNumberEqualTo *int64, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatches *ETag, ifNoneMatch *ETag, requestID *string) (*PageBlobUploadPagesResponse, error) { + if err := validate([]validation{ + {targetValue: body, + constraints: []constraint{{target: "body", name: null, rule: true, chain: nil}}}, + {targetValue: timeout, + constraints: []constraint{{target: "timeout", name: null, rule: false, + chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { + return nil, err + } + req, err := client.uploadPagesPreparer(body, contentLength, timeout, rangeParameter, leaseID, ifSequenceNumberLessThanOrEqualTo, ifSequenceNumberLessThan, ifSequenceNumberEqualTo, ifModifiedSince, ifUnmodifiedSince, ifMatches, ifNoneMatch, requestID) + if err != nil { + return nil, err + } + resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.uploadPagesResponder}, req) + if err != nil { + return nil, err + } + return resp.(*PageBlobUploadPagesResponse), err +} + +// uploadPagesPreparer prepares the UploadPages request. +func (client pageBlobClient) uploadPagesPreparer(body io.ReadSeeker, contentLength int64, timeout *int32, rangeParameter *string, leaseID *string, ifSequenceNumberLessThanOrEqualTo *int64, ifSequenceNumberLessThan *int64, ifSequenceNumberEqualTo *int64, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatches *ETag, ifNoneMatch *ETag, requestID *string) (pipeline.Request, error) { + req, err := pipeline.NewRequest("PUT", client.url, body) + if err != nil { + return req, pipeline.NewError(err, "failed to create request") + } + params := req.URL.Query() + if timeout != nil { + params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) + } + params.Set("comp", "page") + req.URL.RawQuery = params.Encode() + req.Header.Set("Content-Length", strconv.FormatInt(contentLength, 10)) + if rangeParameter != nil { + req.Header.Set("x-ms-range", *rangeParameter) + } + if leaseID != nil { + req.Header.Set("x-ms-lease-id", *leaseID) + } + if ifSequenceNumberLessThanOrEqualTo != nil { + req.Header.Set("x-ms-if-sequence-number-le", strconv.FormatInt(*ifSequenceNumberLessThanOrEqualTo, 10)) + } + if ifSequenceNumberLessThan != nil { + req.Header.Set("x-ms-if-sequence-number-lt", strconv.FormatInt(*ifSequenceNumberLessThan, 10)) + } + if ifSequenceNumberEqualTo != nil { + req.Header.Set("x-ms-if-sequence-number-eq", strconv.FormatInt(*ifSequenceNumberEqualTo, 10)) + } + if ifModifiedSince != nil { + req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123)) + } + if ifUnmodifiedSince != nil { + req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123)) + } + if ifMatches != nil { + req.Header.Set("If-Match", string(*ifMatches)) + } + if ifNoneMatch != nil { + req.Header.Set("If-None-Match", string(*ifNoneMatch)) + } + req.Header.Set("x-ms-version", ServiceVersion) + if requestID != nil { + req.Header.Set("x-ms-client-request-id", *requestID) + } + req.Header.Set("x-ms-page-write", "update") + return req, nil +} + +// uploadPagesResponder handles the response to the UploadPages request. +func (client pageBlobClient) uploadPagesResponder(resp pipeline.Response) (pipeline.Response, error) { + err := validateResponse(resp, http.StatusOK, http.StatusCreated) + if resp == nil { + return nil, err + } + io.Copy(ioutil.Discard, resp.Response().Body) + resp.Response().Body.Close() + return &PageBlobUploadPagesResponse{rawResponse: resp.Response()}, err +} diff --git a/vendor/github.com/Azure/azure-storage-blob-go/2018-03-28/azblob/zz_generated_responder_policy.go b/vendor/github.com/Azure/azure-storage-blob-go/2018-03-28/azblob/zz_generated_responder_policy.go new file mode 100644 index 000000000000..2f391d731258 --- /dev/null +++ b/vendor/github.com/Azure/azure-storage-blob-go/2018-03-28/azblob/zz_generated_responder_policy.go @@ -0,0 +1,74 @@ +package azblob + +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "bytes" + "context" + "encoding/xml" + "github.com/Azure/azure-pipeline-go/pipeline" + "io/ioutil" +) + +type responder func(resp pipeline.Response) (result pipeline.Response, err error) + +// ResponderPolicyFactory is a Factory capable of creating a responder pipeline. +type responderPolicyFactory struct { + responder responder +} + +// New creates a responder policy factory. +func (arpf responderPolicyFactory) New(next pipeline.Policy, po *pipeline.PolicyOptions) pipeline.Policy { + return responderPolicy{next: next, responder: arpf.responder} +} + +type responderPolicy struct { + next pipeline.Policy + responder responder +} + +// Do sends the request to the service and validates/deserializes the HTTP response. +func (arp responderPolicy) Do(ctx context.Context, request pipeline.Request) (pipeline.Response, error) { + resp, err := arp.next.Do(ctx, request) + if err != nil { + return resp, err + } + return arp.responder(resp) +} + +// validateResponse checks an HTTP response's status code against a legal set of codes. +// If the response code is not legal, then validateResponse reads all of the response's body +// (containing error information) and returns a response error. +func validateResponse(resp pipeline.Response, successStatusCodes ...int) error { + if resp == nil { + return NewResponseError(nil, nil, "nil response") + } + responseCode := resp.Response().StatusCode + for _, i := range successStatusCodes { + if i == responseCode { + return nil + } + } + // only close the body in the failure case. in the + // success case responders will close the body as required. + defer resp.Response().Body.Close() + b, err := ioutil.ReadAll(resp.Response().Body) + if err != nil { + return NewResponseError(err, resp.Response(), "failed to read response body") + } + // the service code, description and details will be populated during unmarshalling + responseError := NewResponseError(nil, resp.Response(), resp.Response().Status) + if len(b) > 0 { + if err = xml.Unmarshal(b, &responseError); err != nil { + return NewResponseError(err, resp.Response(), "failed to unmarshal response body") + } + } + return responseError +} + +// removes any BOM from the byte slice +func removeBOM(b []byte) []byte { + // UTF8 + return bytes.TrimPrefix(b, []byte("\xef\xbb\xbf")) +} diff --git a/vendor/github.com/Azure/azure-storage-blob-go/2018-03-28/azblob/zz_generated_response_error.go b/vendor/github.com/Azure/azure-storage-blob-go/2018-03-28/azblob/zz_generated_response_error.go new file mode 100644 index 000000000000..3dcc75bb52b5 --- /dev/null +++ b/vendor/github.com/Azure/azure-storage-blob-go/2018-03-28/azblob/zz_generated_response_error.go @@ -0,0 +1,95 @@ +package azblob + +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "bytes" + "fmt" + "github.com/Azure/azure-pipeline-go/pipeline" + "net" + "net/http" +) + +// if you want to provide custom error handling set this variable to your constructor function +var responseErrorFactory func(cause error, response *http.Response, description string) error + +// ResponseError identifies a responder-generated network or response parsing error. +type ResponseError interface { + // Error exposes the Error(), Temporary() and Timeout() methods. + net.Error // Includes the Go error interface + // Response returns the HTTP response. You may examine this but you should not modify it. + Response() *http.Response +} + +// NewResponseError creates an error object that implements the error interface. +func NewResponseError(cause error, response *http.Response, description string) error { + if responseErrorFactory != nil { + return responseErrorFactory(cause, response, description) + } + return &responseError{ + ErrorNode: pipeline.ErrorNode{}.Initialize(cause, 3), + response: response, + description: description, + } +} + +// responseError is the internal struct that implements the public ResponseError interface. +type responseError struct { + pipeline.ErrorNode // This is embedded so that responseError "inherits" Error, Temporary, Timeout, and Cause + response *http.Response + description string +} + +// Error implements the error interface's Error method to return a string representation of the error. +func (e *responseError) Error() string { + b := &bytes.Buffer{} + fmt.Fprintf(b, "===== RESPONSE ERROR (Code=%v) =====\n", e.response.StatusCode) + fmt.Fprintf(b, "Status=%s, Description: %s\n", e.response.Status, e.description) + s := b.String() + return e.ErrorNode.Error(s) +} + +// Response implements the ResponseError interface's method to return the HTTP response. +func (e *responseError) Response() *http.Response { + return e.response +} + +// RFC7807 PROBLEM ------------------------------------------------------------------------------------ +// RFC7807Problem ... This type can be publicly embedded in another type that wants to add additional members. +/*type RFC7807Problem struct { + // Mandatory: A (relative) URI reference identifying the problem type (it MAY refer to human-readable documentation). + typeURI string // Should default to "about:blank" + // Optional: Short, human-readable summary (maybe localized). + title string + // Optional: HTTP status code generated by the origin server + status int + // Optional: Human-readable explanation for this problem occurance. + // Should help client correct the problem. Clients should NOT parse this string. + detail string + // Optional: A (relative) URI identifying this specific problem occurence (it may or may not be dereferenced). + instance string +} +// NewRFC7807Problem ... +func NewRFC7807Problem(typeURI string, status int, titleFormat string, a ...interface{}) error { + return &RFC7807Problem{ + typeURI: typeURI, + status: status, + title: fmt.Sprintf(titleFormat, a...), + } +} +// Error returns the error information as a string. +func (e *RFC7807Problem) Error() string { + return e.title +} +// TypeURI ... +func (e *RFC7807Problem) TypeURI() string { + if e.typeURI == "" { + e.typeURI = "about:blank" + } + return e.typeURI +} +// Members ... +func (e *RFC7807Problem) Members() (status int, title, detail, instance string) { + return e.status, e.title, e.detail, e.instance +}*/ diff --git a/vendor/github.com/Azure/azure-storage-blob-go/2018-03-28/azblob/zz_generated_service.go b/vendor/github.com/Azure/azure-storage-blob-go/2018-03-28/azblob/zz_generated_service.go new file mode 100644 index 000000000000..76f235e1595b --- /dev/null +++ b/vendor/github.com/Azure/azure-storage-blob-go/2018-03-28/azblob/zz_generated_service.go @@ -0,0 +1,350 @@ +package azblob + +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "bytes" + "context" + "encoding/xml" + "github.com/Azure/azure-pipeline-go/pipeline" + "io" + "io/ioutil" + "net/http" + "net/url" + "strconv" +) + +// serviceClient is the client for the Service methods of the Azblob service. +type serviceClient struct { + managementClient +} + +// newServiceClient creates an instance of the serviceClient client. +func newServiceClient(url url.URL, p pipeline.Pipeline) serviceClient { + return serviceClient{newManagementClient(url, p)} +} + +// GetProperties gets the properties of a storage account's Blob service, including properties for Storage Analytics +// and CORS (Cross-Origin Resource Sharing) rules. +// +// timeout is the timeout parameter is expressed in seconds. For more information, see Setting +// Timeouts for Blob Service Operations. requestID is provides a client-generated, opaque value with a 1 KB +// character limit that is recorded in the analytics logs when storage analytics logging is enabled. +func (client serviceClient) GetProperties(ctx context.Context, timeout *int32, requestID *string) (*StorageServiceProperties, error) { + if err := validate([]validation{ + {targetValue: timeout, + constraints: []constraint{{target: "timeout", name: null, rule: false, + chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { + return nil, err + } + req, err := client.getPropertiesPreparer(timeout, requestID) + if err != nil { + return nil, err + } + resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.getPropertiesResponder}, req) + if err != nil { + return nil, err + } + return resp.(*StorageServiceProperties), err +} + +// getPropertiesPreparer prepares the GetProperties request. +func (client serviceClient) getPropertiesPreparer(timeout *int32, requestID *string) (pipeline.Request, error) { + req, err := pipeline.NewRequest("GET", client.url, nil) + if err != nil { + return req, pipeline.NewError(err, "failed to create request") + } + params := req.URL.Query() + if timeout != nil { + params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) + } + params.Set("restype", "service") + params.Set("comp", "properties") + req.URL.RawQuery = params.Encode() + req.Header.Set("x-ms-version", ServiceVersion) + if requestID != nil { + req.Header.Set("x-ms-client-request-id", *requestID) + } + return req, nil +} + +// getPropertiesResponder handles the response to the GetProperties request. +func (client serviceClient) getPropertiesResponder(resp pipeline.Response) (pipeline.Response, error) { + err := validateResponse(resp, http.StatusOK) + if resp == nil { + return nil, err + } + result := &StorageServiceProperties{rawResponse: resp.Response()} + if err != nil { + return result, err + } + defer resp.Response().Body.Close() + b, err := ioutil.ReadAll(resp.Response().Body) + if err != nil { + return result, NewResponseError(err, resp.Response(), "failed to read response body") + } + if len(b) > 0 { + b = removeBOM(b) + err = xml.Unmarshal(b, result) + if err != nil { + return result, NewResponseError(err, resp.Response(), "failed to unmarshal response body") + } + } + return result, nil +} + +// GetStatistics retrieves statistics related to replication for the Blob service. It is only available on the +// secondary location endpoint when read-access geo-redundant replication is enabled for the storage account. +// +// timeout is the timeout parameter is expressed in seconds. For more information, see Setting +// Timeouts for Blob Service Operations. requestID is provides a client-generated, opaque value with a 1 KB +// character limit that is recorded in the analytics logs when storage analytics logging is enabled. +func (client serviceClient) GetStatistics(ctx context.Context, timeout *int32, requestID *string) (*StorageServiceStats, error) { + if err := validate([]validation{ + {targetValue: timeout, + constraints: []constraint{{target: "timeout", name: null, rule: false, + chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { + return nil, err + } + req, err := client.getStatisticsPreparer(timeout, requestID) + if err != nil { + return nil, err + } + resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.getStatisticsResponder}, req) + if err != nil { + return nil, err + } + return resp.(*StorageServiceStats), err +} + +// getStatisticsPreparer prepares the GetStatistics request. +func (client serviceClient) getStatisticsPreparer(timeout *int32, requestID *string) (pipeline.Request, error) { + req, err := pipeline.NewRequest("GET", client.url, nil) + if err != nil { + return req, pipeline.NewError(err, "failed to create request") + } + params := req.URL.Query() + if timeout != nil { + params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) + } + params.Set("restype", "service") + params.Set("comp", "stats") + req.URL.RawQuery = params.Encode() + req.Header.Set("x-ms-version", ServiceVersion) + if requestID != nil { + req.Header.Set("x-ms-client-request-id", *requestID) + } + return req, nil +} + +// getStatisticsResponder handles the response to the GetStatistics request. +func (client serviceClient) getStatisticsResponder(resp pipeline.Response) (pipeline.Response, error) { + err := validateResponse(resp, http.StatusOK) + if resp == nil { + return nil, err + } + result := &StorageServiceStats{rawResponse: resp.Response()} + if err != nil { + return result, err + } + defer resp.Response().Body.Close() + b, err := ioutil.ReadAll(resp.Response().Body) + if err != nil { + return result, NewResponseError(err, resp.Response(), "failed to read response body") + } + if len(b) > 0 { + b = removeBOM(b) + err = xml.Unmarshal(b, result) + if err != nil { + return result, NewResponseError(err, resp.Response(), "failed to unmarshal response body") + } + } + return result, nil +} + +// ListContainersSegment the List Containers Segment operation returns a list of the containers under the specified +// account +// +// prefix is filters the results to return only containers whose name begins with the specified prefix. marker is a +// string value that identifies the portion of the list of containers to be returned with the next listing operation. +// The operation returns the NextMarker value within the response body if the listing operation did not return all +// containers remaining to be listed with the current page. The NextMarker value can be used as the value for the +// marker parameter in a subsequent call to request the next page of list items. The marker value is opaque to the +// client. maxresults is specifies the maximum number of containers to return. If the request does not specify +// maxresults, or specifies a value greater than 5000, the server will return up to 5000 items. Note that if the +// listing operation crosses a partition boundary, then the service will return a continuation token for retrieving the +// remainder of the results. For this reason, it is possible that the service will return fewer results than specified +// by maxresults, or than the default of 5000. include is include this parameter to specify that the container's +// metadata be returned as part of the response body. timeout is the timeout parameter is expressed in seconds. For +// more information, see Setting +// Timeouts for Blob Service Operations. requestID is provides a client-generated, opaque value with a 1 KB +// character limit that is recorded in the analytics logs when storage analytics logging is enabled. +func (client serviceClient) ListContainersSegment(ctx context.Context, prefix *string, marker *string, maxresults *int32, include ListContainersIncludeType, timeout *int32, requestID *string) (*ListContainersResponse, error) { + if err := validate([]validation{ + {targetValue: maxresults, + constraints: []constraint{{target: "maxresults", name: null, rule: false, + chain: []constraint{{target: "maxresults", name: inclusiveMinimum, rule: 1, chain: nil}}}}}, + {targetValue: timeout, + constraints: []constraint{{target: "timeout", name: null, rule: false, + chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { + return nil, err + } + req, err := client.listContainersSegmentPreparer(prefix, marker, maxresults, include, timeout, requestID) + if err != nil { + return nil, err + } + resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.listContainersSegmentResponder}, req) + if err != nil { + return nil, err + } + return resp.(*ListContainersResponse), err +} + +// listContainersSegmentPreparer prepares the ListContainersSegment request. +func (client serviceClient) listContainersSegmentPreparer(prefix *string, marker *string, maxresults *int32, include ListContainersIncludeType, timeout *int32, requestID *string) (pipeline.Request, error) { + req, err := pipeline.NewRequest("GET", client.url, nil) + if err != nil { + return req, pipeline.NewError(err, "failed to create request") + } + params := req.URL.Query() + if prefix != nil && len(*prefix) > 0 { + params.Set("prefix", *prefix) + } + if marker != nil && len(*marker) > 0 { + params.Set("marker", *marker) + } + if maxresults != nil { + params.Set("maxresults", strconv.FormatInt(int64(*maxresults), 10)) + } + if include != ListContainersIncludeNone { + params.Set("include", string(include)) + } + if timeout != nil { + params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) + } + params.Set("comp", "list") + req.URL.RawQuery = params.Encode() + req.Header.Set("x-ms-version", ServiceVersion) + if requestID != nil { + req.Header.Set("x-ms-client-request-id", *requestID) + } + return req, nil +} + +// listContainersSegmentResponder handles the response to the ListContainersSegment request. +func (client serviceClient) listContainersSegmentResponder(resp pipeline.Response) (pipeline.Response, error) { + err := validateResponse(resp, http.StatusOK) + if resp == nil { + return nil, err + } + result := &ListContainersResponse{rawResponse: resp.Response()} + if err != nil { + return result, err + } + defer resp.Response().Body.Close() + b, err := ioutil.ReadAll(resp.Response().Body) + if err != nil { + return result, NewResponseError(err, resp.Response(), "failed to read response body") + } + if len(b) > 0 { + b = removeBOM(b) + err = xml.Unmarshal(b, result) + if err != nil { + return result, NewResponseError(err, resp.Response(), "failed to unmarshal response body") + } + } + return result, nil +} + +// SetProperties sets properties for a storage account's Blob service endpoint, including properties for Storage +// Analytics and CORS (Cross-Origin Resource Sharing) rules +// +// storageServiceProperties is the StorageService properties. timeout is the timeout parameter is expressed in seconds. +// For more information, see Setting +// Timeouts for Blob Service Operations. requestID is provides a client-generated, opaque value with a 1 KB +// character limit that is recorded in the analytics logs when storage analytics logging is enabled. +func (client serviceClient) SetProperties(ctx context.Context, storageServiceProperties StorageServiceProperties, timeout *int32, requestID *string) (*ServiceSetPropertiesResponse, error) { + if err := validate([]validation{ + {targetValue: storageServiceProperties, + constraints: []constraint{{target: "storageServiceProperties.Logging", name: null, rule: false, + chain: []constraint{{target: "storageServiceProperties.Logging.RetentionPolicy", name: null, rule: true, + chain: []constraint{{target: "storageServiceProperties.Logging.RetentionPolicy.Days", name: null, rule: false, + chain: []constraint{{target: "storageServiceProperties.Logging.RetentionPolicy.Days", name: inclusiveMinimum, rule: 1, chain: nil}}}, + }}, + }}, + {target: "storageServiceProperties.HourMetrics", name: null, rule: false, + chain: []constraint{{target: "storageServiceProperties.HourMetrics.RetentionPolicy", name: null, rule: false, + chain: []constraint{{target: "storageServiceProperties.HourMetrics.RetentionPolicy.Days", name: null, rule: false, + chain: []constraint{{target: "storageServiceProperties.HourMetrics.RetentionPolicy.Days", name: inclusiveMinimum, rule: 1, chain: nil}}}, + }}, + }}, + {target: "storageServiceProperties.MinuteMetrics", name: null, rule: false, + chain: []constraint{{target: "storageServiceProperties.MinuteMetrics.RetentionPolicy", name: null, rule: false, + chain: []constraint{{target: "storageServiceProperties.MinuteMetrics.RetentionPolicy.Days", name: null, rule: false, + chain: []constraint{{target: "storageServiceProperties.MinuteMetrics.RetentionPolicy.Days", name: inclusiveMinimum, rule: 1, chain: nil}}}, + }}, + }}, + {target: "storageServiceProperties.DeleteRetentionPolicy", name: null, rule: false, + chain: []constraint{{target: "storageServiceProperties.DeleteRetentionPolicy.Days", name: null, rule: false, + chain: []constraint{{target: "storageServiceProperties.DeleteRetentionPolicy.Days", name: inclusiveMinimum, rule: 1, chain: nil}}}, + }}}}, + {targetValue: timeout, + constraints: []constraint{{target: "timeout", name: null, rule: false, + chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { + return nil, err + } + req, err := client.setPropertiesPreparer(storageServiceProperties, timeout, requestID) + if err != nil { + return nil, err + } + resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.setPropertiesResponder}, req) + if err != nil { + return nil, err + } + return resp.(*ServiceSetPropertiesResponse), err +} + +// setPropertiesPreparer prepares the SetProperties request. +func (client serviceClient) setPropertiesPreparer(storageServiceProperties StorageServiceProperties, timeout *int32, requestID *string) (pipeline.Request, error) { + req, err := pipeline.NewRequest("PUT", client.url, nil) + if err != nil { + return req, pipeline.NewError(err, "failed to create request") + } + params := req.URL.Query() + if timeout != nil { + params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) + } + params.Set("restype", "service") + params.Set("comp", "properties") + req.URL.RawQuery = params.Encode() + req.Header.Set("x-ms-version", ServiceVersion) + if requestID != nil { + req.Header.Set("x-ms-client-request-id", *requestID) + } + b, err := xml.Marshal(storageServiceProperties) + if err != nil { + return req, pipeline.NewError(err, "failed to marshal request body") + } + req.Header.Set("Content-Type", "application/xml") + err = req.SetBody(bytes.NewReader(b)) + if err != nil { + return req, pipeline.NewError(err, "failed to set request body") + } + return req, nil +} + +// setPropertiesResponder handles the response to the SetProperties request. +func (client serviceClient) setPropertiesResponder(resp pipeline.Response) (pipeline.Response, error) { + err := validateResponse(resp, http.StatusOK, http.StatusAccepted) + if resp == nil { + return nil, err + } + io.Copy(ioutil.Discard, resp.Response().Body) + resp.Response().Body.Close() + return &ServiceSetPropertiesResponse{rawResponse: resp.Response()}, err +} diff --git a/vendor/github.com/Azure/azure-storage-blob-go/2018-03-28/azblob/zz_generated_validation.go b/vendor/github.com/Azure/azure-storage-blob-go/2018-03-28/azblob/zz_generated_validation.go new file mode 100644 index 000000000000..98a2614e606d --- /dev/null +++ b/vendor/github.com/Azure/azure-storage-blob-go/2018-03-28/azblob/zz_generated_validation.go @@ -0,0 +1,367 @@ +package azblob + +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "fmt" + "github.com/Azure/azure-pipeline-go/pipeline" + "reflect" + "regexp" + "strings" +) + +// Constraint stores constraint name, target field name +// Rule and chain validations. +type constraint struct { + // Target field name for validation. + target string + + // Constraint name e.g. minLength, MaxLength, Pattern, etc. + name string + + // Rule for constraint e.g. greater than 10, less than 5 etc. + rule interface{} + + // Chain validations for struct type + chain []constraint +} + +// Validation stores parameter-wise validation. +type validation struct { + targetValue interface{} + constraints []constraint +} + +// Constraint list +const ( + empty = "Empty" + null = "Null" + readOnly = "ReadOnly" + pattern = "Pattern" + maxLength = "MaxLength" + minLength = "MinLength" + maxItems = "MaxItems" + minItems = "MinItems" + multipleOf = "MultipleOf" + uniqueItems = "UniqueItems" + inclusiveMaximum = "InclusiveMaximum" + exclusiveMaximum = "ExclusiveMaximum" + exclusiveMinimum = "ExclusiveMinimum" + inclusiveMinimum = "InclusiveMinimum" +) + +// Validate method validates constraints on parameter +// passed in validation array. +func validate(m []validation) error { + for _, item := range m { + v := reflect.ValueOf(item.targetValue) + for _, constraint := range item.constraints { + var err error + switch v.Kind() { + case reflect.Ptr: + err = validatePtr(v, constraint) + case reflect.String: + err = validateString(v, constraint) + case reflect.Struct: + err = validateStruct(v, constraint) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + err = validateInt(v, constraint) + case reflect.Float32, reflect.Float64: + err = validateFloat(v, constraint) + case reflect.Array, reflect.Slice, reflect.Map: + err = validateArrayMap(v, constraint) + default: + err = createError(v, constraint, fmt.Sprintf("unknown type %v", v.Kind())) + } + if err != nil { + return err + } + } + } + return nil +} + +func validateStruct(x reflect.Value, v constraint, name ...string) error { + //Get field name from target name which is in format a.b.c + s := strings.Split(v.target, ".") + f := x.FieldByName(s[len(s)-1]) + if isZero(f) { + return createError(x, v, fmt.Sprintf("field %q doesn't exist", v.target)) + } + err := validate([]validation{ + { + targetValue: getInterfaceValue(f), + constraints: []constraint{v}, + }, + }) + return err +} + +func validatePtr(x reflect.Value, v constraint) error { + if v.name == readOnly { + if !x.IsNil() { + return createError(x.Elem(), v, "readonly parameter; must send as nil or empty in request") + } + return nil + } + if x.IsNil() { + return checkNil(x, v) + } + if v.chain != nil { + return validate([]validation{ + { + targetValue: getInterfaceValue(x.Elem()), + constraints: v.chain, + }, + }) + } + return nil +} + +func validateInt(x reflect.Value, v constraint) error { + i := x.Int() + r, ok := v.rule.(int) + if !ok { + return createError(x, v, fmt.Sprintf("rule must be integer value for %v constraint; got: %v", v.name, v.rule)) + } + switch v.name { + case multipleOf: + if i%int64(r) != 0 { + return createError(x, v, fmt.Sprintf("value must be a multiple of %v", r)) + } + case exclusiveMinimum: + if i <= int64(r) { + return createError(x, v, fmt.Sprintf("value must be greater than %v", r)) + } + case exclusiveMaximum: + if i >= int64(r) { + return createError(x, v, fmt.Sprintf("value must be less than %v", r)) + } + case inclusiveMinimum: + if i < int64(r) { + return createError(x, v, fmt.Sprintf("value must be greater than or equal to %v", r)) + } + case inclusiveMaximum: + if i > int64(r) { + return createError(x, v, fmt.Sprintf("value must be less than or equal to %v", r)) + } + default: + return createError(x, v, fmt.Sprintf("constraint %v is not applicable for type integer", v.name)) + } + return nil +} + +func validateFloat(x reflect.Value, v constraint) error { + f := x.Float() + r, ok := v.rule.(float64) + if !ok { + return createError(x, v, fmt.Sprintf("rule must be float value for %v constraint; got: %v", v.name, v.rule)) + } + switch v.name { + case exclusiveMinimum: + if f <= r { + return createError(x, v, fmt.Sprintf("value must be greater than %v", r)) + } + case exclusiveMaximum: + if f >= r { + return createError(x, v, fmt.Sprintf("value must be less than %v", r)) + } + case inclusiveMinimum: + if f < r { + return createError(x, v, fmt.Sprintf("value must be greater than or equal to %v", r)) + } + case inclusiveMaximum: + if f > r { + return createError(x, v, fmt.Sprintf("value must be less than or equal to %v", r)) + } + default: + return createError(x, v, fmt.Sprintf("constraint %s is not applicable for type float", v.name)) + } + return nil +} + +func validateString(x reflect.Value, v constraint) error { + s := x.String() + switch v.name { + case empty: + if len(s) == 0 { + return checkEmpty(x, v) + } + case pattern: + reg, err := regexp.Compile(v.rule.(string)) + if err != nil { + return createError(x, v, err.Error()) + } + if !reg.MatchString(s) { + return createError(x, v, fmt.Sprintf("value doesn't match pattern %v", v.rule)) + } + case maxLength: + if _, ok := v.rule.(int); !ok { + return createError(x, v, fmt.Sprintf("rule must be integer value for %v constraint; got: %v", v.name, v.rule)) + } + if len(s) > v.rule.(int) { + return createError(x, v, fmt.Sprintf("value length must be less than %v", v.rule)) + } + case minLength: + if _, ok := v.rule.(int); !ok { + return createError(x, v, fmt.Sprintf("rule must be integer value for %v constraint; got: %v", v.name, v.rule)) + } + if len(s) < v.rule.(int) { + return createError(x, v, fmt.Sprintf("value length must be greater than %v", v.rule)) + } + case readOnly: + if len(s) > 0 { + return createError(reflect.ValueOf(s), v, "readonly parameter; must send as nil or empty in request") + } + default: + return createError(x, v, fmt.Sprintf("constraint %s is not applicable to string type", v.name)) + } + if v.chain != nil { + return validate([]validation{ + { + targetValue: getInterfaceValue(x), + constraints: v.chain, + }, + }) + } + return nil +} + +func validateArrayMap(x reflect.Value, v constraint) error { + switch v.name { + case null: + if x.IsNil() { + return checkNil(x, v) + } + case empty: + if x.IsNil() || x.Len() == 0 { + return checkEmpty(x, v) + } + case maxItems: + if _, ok := v.rule.(int); !ok { + return createError(x, v, fmt.Sprintf("rule must be integer for %v constraint; got: %v", v.name, v.rule)) + } + if x.Len() > v.rule.(int) { + return createError(x, v, fmt.Sprintf("maximum item limit is %v; got: %v", v.rule, x.Len())) + } + case minItems: + if _, ok := v.rule.(int); !ok { + return createError(x, v, fmt.Sprintf("rule must be integer for %v constraint; got: %v", v.name, v.rule)) + } + if x.Len() < v.rule.(int) { + return createError(x, v, fmt.Sprintf("minimum item limit is %v; got: %v", v.rule, x.Len())) + } + case uniqueItems: + if x.Kind() == reflect.Array || x.Kind() == reflect.Slice { + if !checkForUniqueInArray(x) { + return createError(x, v, fmt.Sprintf("all items in parameter %q must be unique; got:%v", v.target, x)) + } + } else if x.Kind() == reflect.Map { + if !checkForUniqueInMap(x) { + return createError(x, v, fmt.Sprintf("all items in parameter %q must be unique; got:%v", v.target, x)) + } + } else { + return createError(x, v, fmt.Sprintf("type must be array, slice or map for constraint %v; got: %v", v.name, x.Kind())) + } + case readOnly: + if x.Len() != 0 { + return createError(x, v, "readonly parameter; must send as nil or empty in request") + } + case pattern: + reg, err := regexp.Compile(v.rule.(string)) + if err != nil { + return createError(x, v, err.Error()) + } + keys := x.MapKeys() + for _, k := range keys { + if !reg.MatchString(k.String()) { + return createError(k, v, fmt.Sprintf("map key doesn't match pattern %v", v.rule)) + } + } + default: + return createError(x, v, fmt.Sprintf("constraint %v is not applicable to array, slice and map type", v.name)) + } + if v.chain != nil { + return validate([]validation{ + { + targetValue: getInterfaceValue(x), + constraints: v.chain, + }, + }) + } + return nil +} + +func checkNil(x reflect.Value, v constraint) error { + if _, ok := v.rule.(bool); !ok { + return createError(x, v, fmt.Sprintf("rule must be bool value for %v constraint; got: %v", v.name, v.rule)) + } + if v.rule.(bool) { + return createError(x, v, "value can not be null; required parameter") + } + return nil +} + +func checkEmpty(x reflect.Value, v constraint) error { + if _, ok := v.rule.(bool); !ok { + return createError(x, v, fmt.Sprintf("rule must be bool value for %v constraint; got: %v", v.name, v.rule)) + } + if v.rule.(bool) { + return createError(x, v, "value can not be null or empty; required parameter") + } + return nil +} + +func checkForUniqueInArray(x reflect.Value) bool { + if x == reflect.Zero(reflect.TypeOf(x)) || x.Len() == 0 { + return false + } + arrOfInterface := make([]interface{}, x.Len()) + for i := 0; i < x.Len(); i++ { + arrOfInterface[i] = x.Index(i).Interface() + } + m := make(map[interface{}]bool) + for _, val := range arrOfInterface { + if m[val] { + return false + } + m[val] = true + } + return true +} + +func checkForUniqueInMap(x reflect.Value) bool { + if x == reflect.Zero(reflect.TypeOf(x)) || x.Len() == 0 { + return false + } + mapOfInterface := make(map[interface{}]interface{}, x.Len()) + keys := x.MapKeys() + for _, k := range keys { + mapOfInterface[k.Interface()] = x.MapIndex(k).Interface() + } + m := make(map[interface{}]bool) + for _, val := range mapOfInterface { + if m[val] { + return false + } + m[val] = true + } + return true +} + +func getInterfaceValue(x reflect.Value) interface{} { + if x.Kind() == reflect.Invalid { + return nil + } + return x.Interface() +} + +func isZero(x interface{}) bool { + return x == reflect.Zero(reflect.TypeOf(x)).Interface() +} + +func createError(x reflect.Value, v constraint, message string) error { + return pipeline.NewError(nil, fmt.Sprintf("validation failed: parameter=%s constraint=%s value=%#v details: %s", + v.target, v.name, getInterfaceValue(x), message)) +} diff --git a/vendor/github.com/Azure/azure-storage-blob-go/2018-03-28/azblob/zz_generated_version.go b/vendor/github.com/Azure/azure-storage-blob-go/2018-03-28/azblob/zz_generated_version.go new file mode 100644 index 000000000000..760271a42159 --- /dev/null +++ b/vendor/github.com/Azure/azure-storage-blob-go/2018-03-28/azblob/zz_generated_version.go @@ -0,0 +1,14 @@ +package azblob + +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +// UserAgent returns the UserAgent string to use when sending http.Requests. +func UserAgent() string { + return "Azure-SDK-For-Go/0.0.0 azblob/2018-03-28" +} + +// Version returns the semantic version (see http://semver.org) of the client. +func Version() string { + return "0.0.0" +} diff --git a/vendor/github.com/Azure/azure-storage-blob-go/2018-03-28/azblob/zz_response_helpers.go b/vendor/github.com/Azure/azure-storage-blob-go/2018-03-28/azblob/zz_response_helpers.go new file mode 100644 index 000000000000..b4f058b6748c --- /dev/null +++ b/vendor/github.com/Azure/azure-storage-blob-go/2018-03-28/azblob/zz_response_helpers.go @@ -0,0 +1,242 @@ +package azblob + +import ( + "context" + "io" + "net/http" + "time" +) + +// BlobHTTPHeaders contains read/writeable blob properties. +type BlobHTTPHeaders struct { + ContentType string + ContentMD5 []byte + ContentEncoding string + ContentLanguage string + ContentDisposition string + CacheControl string +} + +// NewHTTPHeaders returns the user-modifiable properties for this blob. +func (bgpr BlobGetPropertiesResponse) NewHTTPHeaders() BlobHTTPHeaders { + return BlobHTTPHeaders{ + ContentType: bgpr.ContentType(), + ContentEncoding: bgpr.ContentEncoding(), + ContentLanguage: bgpr.ContentLanguage(), + ContentDisposition: bgpr.ContentDisposition(), + CacheControl: bgpr.CacheControl(), + ContentMD5: bgpr.ContentMD5(), + } +} + +/////////////////////////////////////////////////////////////////////////////// + +// NewHTTPHeaders returns the user-modifiable properties for this blob. +func (dr downloadResponse) NewHTTPHeaders() BlobHTTPHeaders { + return BlobHTTPHeaders{ + ContentType: dr.ContentType(), + ContentEncoding: dr.ContentEncoding(), + ContentLanguage: dr.ContentLanguage(), + ContentDisposition: dr.ContentDisposition(), + CacheControl: dr.CacheControl(), + ContentMD5: dr.ContentMD5(), + } +} + +/////////////////////////////////////////////////////////////////////////////// + +// DownloadResponse wraps AutoRest generated downloadResponse and helps to provide info for retry. +type DownloadResponse struct { + r *downloadResponse + ctx context.Context + b BlobURL + getInfo HTTPGetterInfo +} + +// Body constructs new RetryReader stream for reading data. If a connection failes +// while reading, it will make additional requests to reestablish a connection and +// continue reading. Specifying a RetryReaderOption's with MaxRetryRequests set to 0 +// (the default), returns the original response body and no retries will be performed. +func (r *DownloadResponse) Body(o RetryReaderOptions) io.ReadCloser { + if o.MaxRetryRequests == 0 { // No additional retries + return r.Response().Body + } + return NewRetryReader(r.ctx, r.Response(), r.getInfo, o, + func(ctx context.Context, getInfo HTTPGetterInfo) (*http.Response, error) { + resp, err := r.b.Download(ctx, getInfo.Offset, getInfo.Count, + BlobAccessConditions{ + HTTPAccessConditions: HTTPAccessConditions{IfMatch: getInfo.ETag}, + }, + false) + if err != nil { + return nil, err + } + return resp.Response(), err + }, + ) +} + +// Response returns the raw HTTP response object. +func (r DownloadResponse) Response() *http.Response { + return r.r.Response() +} + +// NewHTTPHeaders returns the user-modifiable properties for this blob. +func (r DownloadResponse) NewHTTPHeaders() BlobHTTPHeaders { + return r.r.NewHTTPHeaders() +} + +// BlobContentMD5 returns the value for header x-ms-blob-content-md5. +func (r DownloadResponse) BlobContentMD5() []byte { + return r.r.BlobContentMD5() +} + +// ContentMD5 returns the value for header Content-MD5. +func (r DownloadResponse) ContentMD5() []byte { + return r.r.ContentMD5() +} + +// StatusCode returns the HTTP status code of the response, e.g. 200. +func (r DownloadResponse) StatusCode() int { + return r.r.StatusCode() +} + +// Status returns the HTTP status message of the response, e.g. "200 OK". +func (r DownloadResponse) Status() string { + return r.r.Status() +} + +// AcceptRanges returns the value for header Accept-Ranges. +func (r DownloadResponse) AcceptRanges() string { + return r.r.AcceptRanges() +} + +// BlobCommittedBlockCount returns the value for header x-ms-blob-committed-block-count. +func (r DownloadResponse) BlobCommittedBlockCount() int32 { + return r.r.BlobCommittedBlockCount() +} + +// BlobSequenceNumber returns the value for header x-ms-blob-sequence-number. +func (r DownloadResponse) BlobSequenceNumber() int64 { + return r.r.BlobSequenceNumber() +} + +// BlobType returns the value for header x-ms-blob-type. +func (r DownloadResponse) BlobType() BlobType { + return r.r.BlobType() +} + +// CacheControl returns the value for header Cache-Control. +func (r DownloadResponse) CacheControl() string { + return r.r.CacheControl() +} + +// ContentDisposition returns the value for header Content-Disposition. +func (r DownloadResponse) ContentDisposition() string { + return r.r.ContentDisposition() +} + +// ContentEncoding returns the value for header Content-Encoding. +func (r DownloadResponse) ContentEncoding() string { + return r.r.ContentEncoding() +} + +// ContentLanguage returns the value for header Content-Language. +func (r DownloadResponse) ContentLanguage() string { + return r.r.ContentLanguage() +} + +// ContentLength returns the value for header Content-Length. +func (r DownloadResponse) ContentLength() int64 { + return r.r.ContentLength() +} + +// ContentRange returns the value for header Content-Range. +func (r DownloadResponse) ContentRange() string { + return r.r.ContentRange() +} + +// ContentType returns the value for header Content-Type. +func (r DownloadResponse) ContentType() string { + return r.r.ContentType() +} + +// CopyCompletionTime returns the value for header x-ms-copy-completion-time. +func (r DownloadResponse) CopyCompletionTime() time.Time { + return r.r.CopyCompletionTime() +} + +// CopyID returns the value for header x-ms-copy-id. +func (r DownloadResponse) CopyID() string { + return r.r.CopyID() +} + +// CopyProgress returns the value for header x-ms-copy-progress. +func (r DownloadResponse) CopyProgress() string { + return r.r.CopyProgress() +} + +// CopySource returns the value for header x-ms-copy-source. +func (r DownloadResponse) CopySource() string { + return r.r.CopySource() +} + +// CopyStatus returns the value for header x-ms-copy-status. +func (r DownloadResponse) CopyStatus() CopyStatusType { + return r.r.CopyStatus() +} + +// CopyStatusDescription returns the value for header x-ms-copy-status-description. +func (r DownloadResponse) CopyStatusDescription() string { + return r.r.CopyStatusDescription() +} + +// Date returns the value for header Date. +func (r DownloadResponse) Date() time.Time { + return r.r.Date() +} + +// ETag returns the value for header ETag. +func (r DownloadResponse) ETag() ETag { + return r.r.ETag() +} + +// IsServerEncrypted returns the value for header x-ms-server-encrypted. +func (r DownloadResponse) IsServerEncrypted() string { + return r.r.IsServerEncrypted() +} + +// LastModified returns the value for header Last-Modified. +func (r DownloadResponse) LastModified() time.Time { + return r.r.LastModified() +} + +// LeaseDuration returns the value for header x-ms-lease-duration. +func (r DownloadResponse) LeaseDuration() LeaseDurationType { + return r.r.LeaseDuration() +} + +// LeaseState returns the value for header x-ms-lease-state. +func (r DownloadResponse) LeaseState() LeaseStateType { + return r.r.LeaseState() +} + +// LeaseStatus returns the value for header x-ms-lease-status. +func (r DownloadResponse) LeaseStatus() LeaseStatusType { + return r.r.LeaseStatus() +} + +// RequestID returns the value for header x-ms-request-id. +func (r DownloadResponse) RequestID() string { + return r.r.RequestID() +} + +// Version returns the value for header x-ms-version. +func (r DownloadResponse) Version() string { + return r.r.Version() +} + +// NewMetadata returns user-defined key/value pairs. +func (r DownloadResponse) NewMetadata() Metadata { + return r.r.NewMetadata() +} diff --git a/vendor/github.com/Azure/azure-storage-blob-go/LICENSE b/vendor/github.com/Azure/azure-storage-blob-go/LICENSE new file mode 100644 index 000000000000..d1ca00f20a89 --- /dev/null +++ b/vendor/github.com/Azure/azure-storage-blob-go/LICENSE @@ -0,0 +1,21 @@ + MIT License + + Copyright (c) Microsoft Corporation. All rights reserved. + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to deal + in the Software without restriction, including without limitation the rights + to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be included in all + copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + SOFTWARE \ No newline at end of file diff --git a/vendor/github.com/Azure/azure-storage-go/README.md b/vendor/github.com/Azure/azure-storage-go/README.md deleted file mode 100644 index f4af7bf399c4..000000000000 --- a/vendor/github.com/Azure/azure-storage-go/README.md +++ /dev/null @@ -1,10 +0,0 @@ -# Azure Storage SDK for Go -[![GoDoc](https://godoc.org/github.com/Azure/azure-storage-go?status.svg)](https://godoc.org/github.com/Azure/azure-storage-go) [![Build Status](https://travis-ci.org/Azure/azure-storage-go.svg?branch=master)](https://travis-ci.org/Azure/azure-storage-go) [![Go Report Card](https://goreportcard.com/badge/github.com/Azure/azure-storage-go)](https://goreportcard.com/report/github.com/Azure/azure-storage-go) - -The `github.com/Azure/azure-sdk-for-go/storage` package is used to perform operations in Azure Storage Service. To manage your storage accounts (Azure Resource Manager / ARM), use the [github.com/Azure/azure-sdk-for-go/arm/storage](../arm/storage) package. For your classic storage accounts (Azure Service Management / ASM), use [github.com/Azure/azure-sdk-for-go/management/storageservice](../management/storageservice) package. - -This package includes support for [Azure Storage Emulator](https://azure.microsoft.com/documentation/articles/storage-use-emulator/) - -# Contributing - -This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/). For more information see the [Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional questions or comments. diff --git a/vendor/github.com/Azure/azure-storage-go/authorization.go b/vendor/github.com/Azure/azure-storage-go/authorization.go deleted file mode 100644 index 89a0d0b3cdf8..000000000000 --- a/vendor/github.com/Azure/azure-storage-go/authorization.go +++ /dev/null @@ -1,223 +0,0 @@ -// Package storage provides clients for Microsoft Azure Storage Services. -package storage - -import ( - "bytes" - "fmt" - "net/url" - "sort" - "strings" -) - -// See: https://docs.microsoft.com/rest/api/storageservices/fileservices/authentication-for-the-azure-storage-services - -type authentication string - -const ( - sharedKey authentication = "sharedKey" - sharedKeyForTable authentication = "sharedKeyTable" - sharedKeyLite authentication = "sharedKeyLite" - sharedKeyLiteForTable authentication = "sharedKeyLiteTable" - - // headers - headerAuthorization = "Authorization" - headerContentLength = "Content-Length" - headerDate = "Date" - headerXmsDate = "x-ms-date" - headerXmsVersion = "x-ms-version" - headerContentEncoding = "Content-Encoding" - headerContentLanguage = "Content-Language" - headerContentType = "Content-Type" - headerContentMD5 = "Content-MD5" - headerIfModifiedSince = "If-Modified-Since" - headerIfMatch = "If-Match" - headerIfNoneMatch = "If-None-Match" - headerIfUnmodifiedSince = "If-Unmodified-Since" - headerRange = "Range" -) - -func (c *Client) addAuthorizationHeader(verb, url string, headers map[string]string, auth authentication) (map[string]string, error) { - authHeader, err := c.getSharedKey(verb, url, headers, auth) - if err != nil { - return nil, err - } - headers[headerAuthorization] = authHeader - return headers, nil -} - -func (c *Client) getSharedKey(verb, url string, headers map[string]string, auth authentication) (string, error) { - canRes, err := c.buildCanonicalizedResource(url, auth) - if err != nil { - return "", err - } - - canString, err := buildCanonicalizedString(verb, headers, canRes, auth) - if err != nil { - return "", err - } - return c.createAuthorizationHeader(canString, auth), nil -} - -func (c *Client) buildCanonicalizedResource(uri string, auth authentication) (string, error) { - errMsg := "buildCanonicalizedResource error: %s" - u, err := url.Parse(uri) - if err != nil { - return "", fmt.Errorf(errMsg, err.Error()) - } - - cr := bytes.NewBufferString("/") - cr.WriteString(c.getCanonicalizedAccountName()) - - if len(u.Path) > 0 { - // Any portion of the CanonicalizedResource string that is derived from - // the resource's URI should be encoded exactly as it is in the URI. - // -- https://msdn.microsoft.com/en-gb/library/azure/dd179428.aspx - cr.WriteString(u.EscapedPath()) - } - - params, err := url.ParseQuery(u.RawQuery) - if err != nil { - return "", fmt.Errorf(errMsg, err.Error()) - } - - // See https://github.com/Azure/azure-storage-net/blob/master/Lib/Common/Core/Util/AuthenticationUtility.cs#L277 - if auth == sharedKey { - if len(params) > 0 { - cr.WriteString("\n") - - keys := []string{} - for key := range params { - keys = append(keys, key) - } - sort.Strings(keys) - - completeParams := []string{} - for _, key := range keys { - if len(params[key]) > 1 { - sort.Strings(params[key]) - } - - completeParams = append(completeParams, fmt.Sprintf("%s:%s", key, strings.Join(params[key], ","))) - } - cr.WriteString(strings.Join(completeParams, "\n")) - } - } else { - // search for "comp" parameter, if exists then add it to canonicalizedresource - if v, ok := params["comp"]; ok { - cr.WriteString("?comp=" + v[0]) - } - } - - return string(cr.Bytes()), nil -} - -func (c *Client) getCanonicalizedAccountName() string { - // since we may be trying to access a secondary storage account, we need to - // remove the -secondary part of the storage name - return strings.TrimSuffix(c.accountName, "-secondary") -} - -func buildCanonicalizedString(verb string, headers map[string]string, canonicalizedResource string, auth authentication) (string, error) { - contentLength := headers[headerContentLength] - if contentLength == "0" { - contentLength = "" - } - date := headers[headerDate] - if v, ok := headers[headerXmsDate]; ok { - if auth == sharedKey || auth == sharedKeyLite { - date = "" - } else { - date = v - } - } - var canString string - switch auth { - case sharedKey: - canString = strings.Join([]string{ - verb, - headers[headerContentEncoding], - headers[headerContentLanguage], - contentLength, - headers[headerContentMD5], - headers[headerContentType], - date, - headers[headerIfModifiedSince], - headers[headerIfMatch], - headers[headerIfNoneMatch], - headers[headerIfUnmodifiedSince], - headers[headerRange], - buildCanonicalizedHeader(headers), - canonicalizedResource, - }, "\n") - case sharedKeyForTable: - canString = strings.Join([]string{ - verb, - headers[headerContentMD5], - headers[headerContentType], - date, - canonicalizedResource, - }, "\n") - case sharedKeyLite: - canString = strings.Join([]string{ - verb, - headers[headerContentMD5], - headers[headerContentType], - date, - buildCanonicalizedHeader(headers), - canonicalizedResource, - }, "\n") - case sharedKeyLiteForTable: - canString = strings.Join([]string{ - date, - canonicalizedResource, - }, "\n") - default: - return "", fmt.Errorf("%s authentication is not supported yet", auth) - } - return canString, nil -} - -func buildCanonicalizedHeader(headers map[string]string) string { - cm := make(map[string]string) - - for k, v := range headers { - headerName := strings.TrimSpace(strings.ToLower(k)) - if strings.HasPrefix(headerName, "x-ms-") { - cm[headerName] = v - } - } - - if len(cm) == 0 { - return "" - } - - keys := []string{} - for key := range cm { - keys = append(keys, key) - } - - sort.Strings(keys) - - ch := bytes.NewBufferString("") - - for _, key := range keys { - ch.WriteString(key) - ch.WriteRune(':') - ch.WriteString(cm[key]) - ch.WriteRune('\n') - } - - return strings.TrimSuffix(string(ch.Bytes()), "\n") -} - -func (c *Client) createAuthorizationHeader(canonicalizedString string, auth authentication) string { - signature := c.computeHmac256(canonicalizedString) - var key string - switch auth { - case sharedKey, sharedKeyForTable: - key = "SharedKey" - case sharedKeyLite, sharedKeyLiteForTable: - key = "SharedKeyLite" - } - return fmt.Sprintf("%s %s:%s", key, c.getCanonicalizedAccountName(), signature) -} diff --git a/vendor/github.com/Azure/azure-storage-go/blob.go b/vendor/github.com/Azure/azure-storage-go/blob.go deleted file mode 100644 index 636efc662d69..000000000000 --- a/vendor/github.com/Azure/azure-storage-go/blob.go +++ /dev/null @@ -1,1130 +0,0 @@ -package storage - -import ( - "bytes" - "encoding/xml" - "errors" - "fmt" - "io" - "net/http" - "net/url" - "strconv" - "strings" - "time" -) - -// A Blob is an entry in BlobListResponse. -type Blob struct { - Name string `xml:"Name"` - Properties BlobProperties `xml:"Properties"` - Metadata BlobMetadata `xml:"Metadata"` -} - -// BlobMetadata is a set of custom name/value pairs. -// -// See https://msdn.microsoft.com/en-us/library/azure/dd179404.aspx -type BlobMetadata map[string]string - -type blobMetadataEntries struct { - Entries []blobMetadataEntry `xml:",any"` -} -type blobMetadataEntry struct { - XMLName xml.Name - Value string `xml:",chardata"` -} - -// UnmarshalXML converts the xml:Metadata into Metadata map -func (bm *BlobMetadata) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { - var entries blobMetadataEntries - if err := d.DecodeElement(&entries, &start); err != nil { - return err - } - for _, entry := range entries.Entries { - if *bm == nil { - *bm = make(BlobMetadata) - } - (*bm)[strings.ToLower(entry.XMLName.Local)] = entry.Value - } - return nil -} - -// MarshalXML implements the xml.Marshaler interface. It encodes -// metadata name/value pairs as they would appear in an Azure -// ListBlobs response. -func (bm BlobMetadata) MarshalXML(enc *xml.Encoder, start xml.StartElement) error { - entries := make([]blobMetadataEntry, 0, len(bm)) - for k, v := range bm { - entries = append(entries, blobMetadataEntry{ - XMLName: xml.Name{Local: http.CanonicalHeaderKey(k)}, - Value: v, - }) - } - return enc.EncodeElement(blobMetadataEntries{ - Entries: entries, - }, start) -} - -// BlobProperties contains various properties of a blob -// returned in various endpoints like ListBlobs or GetBlobProperties. -type BlobProperties struct { - LastModified string `xml:"Last-Modified"` - Etag string `xml:"Etag"` - ContentMD5 string `xml:"Content-MD5"` - ContentLength int64 `xml:"Content-Length"` - ContentType string `xml:"Content-Type"` - ContentEncoding string `xml:"Content-Encoding"` - CacheControl string `xml:"Cache-Control"` - ContentLanguage string `xml:"Cache-Language"` - BlobType BlobType `xml:"x-ms-blob-blob-type"` - SequenceNumber int64 `xml:"x-ms-blob-sequence-number"` - CopyID string `xml:"CopyId"` - CopyStatus string `xml:"CopyStatus"` - CopySource string `xml:"CopySource"` - CopyProgress string `xml:"CopyProgress"` - CopyCompletionTime string `xml:"CopyCompletionTime"` - CopyStatusDescription string `xml:"CopyStatusDescription"` - LeaseStatus string `xml:"LeaseStatus"` - LeaseState string `xml:"LeaseState"` -} - -// BlobHeaders contains various properties of a blob and is an entry -// in SetBlobProperties -type BlobHeaders struct { - ContentMD5 string `header:"x-ms-blob-content-md5"` - ContentLanguage string `header:"x-ms-blob-content-language"` - ContentEncoding string `header:"x-ms-blob-content-encoding"` - ContentType string `header:"x-ms-blob-content-type"` - CacheControl string `header:"x-ms-blob-cache-control"` -} - -// BlobType defines the type of the Azure Blob. -type BlobType string - -// Types of page blobs -const ( - BlobTypeBlock BlobType = "BlockBlob" - BlobTypePage BlobType = "PageBlob" - BlobTypeAppend BlobType = "AppendBlob" -) - -// PageWriteType defines the type updates that are going to be -// done on the page blob. -type PageWriteType string - -// Types of operations on page blobs -const ( - PageWriteTypeUpdate PageWriteType = "update" - PageWriteTypeClear PageWriteType = "clear" -) - -const ( - blobCopyStatusPending = "pending" - blobCopyStatusSuccess = "success" - blobCopyStatusAborted = "aborted" - blobCopyStatusFailed = "failed" -) - -// lease constants. -const ( - leaseHeaderPrefix = "x-ms-lease-" - headerLeaseID = "x-ms-lease-id" - leaseAction = "x-ms-lease-action" - leaseBreakPeriod = "x-ms-lease-break-period" - leaseDuration = "x-ms-lease-duration" - leaseProposedID = "x-ms-proposed-lease-id" - leaseTime = "x-ms-lease-time" - - acquireLease = "acquire" - renewLease = "renew" - changeLease = "change" - releaseLease = "release" - breakLease = "break" -) - -// BlockListType is used to filter out types of blocks in a Get Blocks List call -// for a block blob. -// -// See https://msdn.microsoft.com/en-us/library/azure/dd179400.aspx for all -// block types. -type BlockListType string - -// Filters for listing blocks in block blobs -const ( - BlockListTypeAll BlockListType = "all" - BlockListTypeCommitted BlockListType = "committed" - BlockListTypeUncommitted BlockListType = "uncommitted" -) - -// Maximum sizes (per REST API) for various concepts -const ( - MaxBlobBlockSize = 100 * 1024 * 1024 - MaxBlobPageSize = 4 * 1024 * 1024 -) - -// BlockStatus defines states a block for a block blob can -// be in. -type BlockStatus string - -// List of statuses that can be used to refer to a block in a block list -const ( - BlockStatusUncommitted BlockStatus = "Uncommitted" - BlockStatusCommitted BlockStatus = "Committed" - BlockStatusLatest BlockStatus = "Latest" -) - -// Block is used to create Block entities for Put Block List -// call. -type Block struct { - ID string - Status BlockStatus -} - -// BlockListResponse contains the response fields from Get Block List call. -// -// See https://msdn.microsoft.com/en-us/library/azure/dd179400.aspx -type BlockListResponse struct { - XMLName xml.Name `xml:"BlockList"` - CommittedBlocks []BlockResponse `xml:"CommittedBlocks>Block"` - UncommittedBlocks []BlockResponse `xml:"UncommittedBlocks>Block"` -} - -// BlockResponse contains the block information returned -// in the GetBlockListCall. -type BlockResponse struct { - Name string `xml:"Name"` - Size int64 `xml:"Size"` -} - -// GetPageRangesResponse contains the response fields from -// Get Page Ranges call. -// -// See https://msdn.microsoft.com/en-us/library/azure/ee691973.aspx -type GetPageRangesResponse struct { - XMLName xml.Name `xml:"PageList"` - PageList []PageRange `xml:"PageRange"` -} - -// PageRange contains information about a page of a page blob from -// Get Pages Range call. -// -// See https://msdn.microsoft.com/en-us/library/azure/ee691973.aspx -type PageRange struct { - Start int64 `xml:"Start"` - End int64 `xml:"End"` -} - -var ( - errBlobCopyAborted = errors.New("storage: blob copy is aborted") - errBlobCopyIDMismatch = errors.New("storage: blob copy id is a mismatch") -) - -// BlobExists returns true if a blob with given name exists on the specified -// container of the storage account. -func (b BlobStorageClient) BlobExists(container, name string) (bool, error) { - uri := b.client.getEndpoint(blobServiceName, pathForBlob(container, name), url.Values{}) - headers := b.client.getStandardHeaders() - resp, err := b.client.exec(http.MethodHead, uri, headers, nil, b.auth) - if resp != nil { - defer readAndCloseBody(resp.body) - if resp.statusCode == http.StatusOK || resp.statusCode == http.StatusNotFound { - return resp.statusCode == http.StatusOK, nil - } - } - return false, err -} - -// GetBlobURL gets the canonical URL to the blob with the specified name in the -// specified container. If name is not specified, the canonical URL for the entire -// container is obtained. -// This method does not create a publicly accessible URL if the blob or container -// is private and this method does not check if the blob exists. -func (b BlobStorageClient) GetBlobURL(container, name string) string { - if container == "" { - container = "$root" - } - return b.client.getEndpoint(blobServiceName, pathForResource(container, name), url.Values{}) -} - -// GetBlob returns a stream to read the blob. Caller must call Close() the -// reader to close on the underlying connection. -// -// See https://msdn.microsoft.com/en-us/library/azure/dd179440.aspx -func (b BlobStorageClient) GetBlob(container, name string) (io.ReadCloser, error) { - resp, err := b.getBlobRange(container, name, "", nil) - if err != nil { - return nil, err - } - - if err := checkRespCode(resp.statusCode, []int{http.StatusOK}); err != nil { - return nil, err - } - return resp.body, nil -} - -// GetBlobRange reads the specified range of a blob to a stream. The bytesRange -// string must be in a format like "0-", "10-100" as defined in HTTP 1.1 spec. -// -// See https://msdn.microsoft.com/en-us/library/azure/dd179440.aspx -func (b BlobStorageClient) GetBlobRange(container, name, bytesRange string, extraHeaders map[string]string) (io.ReadCloser, error) { - resp, err := b.getBlobRange(container, name, bytesRange, extraHeaders) - if err != nil { - return nil, err - } - - if err := checkRespCode(resp.statusCode, []int{http.StatusPartialContent}); err != nil { - return nil, err - } - return resp.body, nil -} - -func (b BlobStorageClient) getBlobRange(container, name, bytesRange string, extraHeaders map[string]string) (*storageResponse, error) { - uri := b.client.getEndpoint(blobServiceName, pathForBlob(container, name), url.Values{}) - - extraHeaders = b.client.protectUserAgent(extraHeaders) - headers := b.client.getStandardHeaders() - if bytesRange != "" { - headers["Range"] = fmt.Sprintf("bytes=%s", bytesRange) - } - - for k, v := range extraHeaders { - headers[k] = v - } - - resp, err := b.client.exec(http.MethodGet, uri, headers, nil, b.auth) - if err != nil { - return nil, err - } - return resp, err -} - -// leasePut is common PUT code for the various acquire/release/break etc functions. -func (b BlobStorageClient) leaseCommonPut(container string, name string, headers map[string]string, expectedStatus int) (http.Header, error) { - params := url.Values{"comp": {"lease"}} - uri := b.client.getEndpoint(blobServiceName, pathForBlob(container, name), params) - - resp, err := b.client.exec(http.MethodPut, uri, headers, nil, b.auth) - if err != nil { - return nil, err - } - defer readAndCloseBody(resp.body) - - if err := checkRespCode(resp.statusCode, []int{expectedStatus}); err != nil { - return nil, err - } - - return resp.headers, nil -} - -// SnapshotBlob creates a snapshot for a blob as per https://msdn.microsoft.com/en-us/library/azure/ee691971.aspx -func (b BlobStorageClient) SnapshotBlob(container string, name string, timeout int, extraHeaders map[string]string) (snapshotTimestamp *time.Time, err error) { - extraHeaders = b.client.protectUserAgent(extraHeaders) - headers := b.client.getStandardHeaders() - params := url.Values{"comp": {"snapshot"}} - - if timeout > 0 { - params.Add("timeout", strconv.Itoa(timeout)) - } - - for k, v := range extraHeaders { - headers[k] = v - } - - uri := b.client.getEndpoint(blobServiceName, pathForBlob(container, name), params) - resp, err := b.client.exec(http.MethodPut, uri, headers, nil, b.auth) - if err != nil || resp == nil { - return nil, err - } - - defer readAndCloseBody(resp.body) - - if err := checkRespCode(resp.statusCode, []int{http.StatusCreated}); err != nil { - return nil, err - } - - snapshotResponse := resp.headers.Get(http.CanonicalHeaderKey("x-ms-snapshot")) - if snapshotResponse != "" { - snapshotTimestamp, err := time.Parse(time.RFC3339, snapshotResponse) - if err != nil { - return nil, err - } - - return &snapshotTimestamp, nil - } - - return nil, errors.New("Snapshot not created") -} - -// AcquireLease creates a lease for a blob as per https://msdn.microsoft.com/en-us/library/azure/ee691972.aspx -// returns leaseID acquired -// In API Versions starting on 2012-02-12, the minimum leaseTimeInSeconds is 15, the maximum -// non-infinite leaseTimeInSeconds is 60. To specify an infinite lease, provide the value -1. -func (b BlobStorageClient) AcquireLease(container string, name string, leaseTimeInSeconds int, proposedLeaseID string) (returnedLeaseID string, err error) { - headers := b.client.getStandardHeaders() - headers[leaseAction] = acquireLease - - if leaseTimeInSeconds == -1 { - // Do nothing, but don't trigger the following clauses. - } else if leaseTimeInSeconds > 60 || b.client.apiVersion < "2012-02-12" { - leaseTimeInSeconds = 60 - } else if leaseTimeInSeconds < 15 { - leaseTimeInSeconds = 15 - } - - headers[leaseDuration] = strconv.Itoa(leaseTimeInSeconds) - - if proposedLeaseID != "" { - headers[leaseProposedID] = proposedLeaseID - } - - respHeaders, err := b.leaseCommonPut(container, name, headers, http.StatusCreated) - if err != nil { - return "", err - } - - returnedLeaseID = respHeaders.Get(http.CanonicalHeaderKey(headerLeaseID)) - - if returnedLeaseID != "" { - return returnedLeaseID, nil - } - - return "", errors.New("LeaseID not returned") -} - -// BreakLease breaks the lease for a blob as per https://msdn.microsoft.com/en-us/library/azure/ee691972.aspx -// Returns the timeout remaining in the lease in seconds -func (b BlobStorageClient) BreakLease(container string, name string) (breakTimeout int, err error) { - headers := b.client.getStandardHeaders() - headers[leaseAction] = breakLease - return b.breakLeaseCommon(container, name, headers) -} - -// BreakLeaseWithBreakPeriod breaks the lease for a blob as per https://msdn.microsoft.com/en-us/library/azure/ee691972.aspx -// breakPeriodInSeconds is used to determine how long until new lease can be created. -// Returns the timeout remaining in the lease in seconds -func (b BlobStorageClient) BreakLeaseWithBreakPeriod(container string, name string, breakPeriodInSeconds int) (breakTimeout int, err error) { - headers := b.client.getStandardHeaders() - headers[leaseAction] = breakLease - headers[leaseBreakPeriod] = strconv.Itoa(breakPeriodInSeconds) - return b.breakLeaseCommon(container, name, headers) -} - -// breakLeaseCommon is common code for both version of BreakLease (with and without break period) -func (b BlobStorageClient) breakLeaseCommon(container string, name string, headers map[string]string) (breakTimeout int, err error) { - - respHeaders, err := b.leaseCommonPut(container, name, headers, http.StatusAccepted) - if err != nil { - return 0, err - } - - breakTimeoutStr := respHeaders.Get(http.CanonicalHeaderKey(leaseTime)) - if breakTimeoutStr != "" { - breakTimeout, err = strconv.Atoi(breakTimeoutStr) - if err != nil { - return 0, err - } - } - - return breakTimeout, nil -} - -// ChangeLease changes a lease ID for a blob as per https://msdn.microsoft.com/en-us/library/azure/ee691972.aspx -// Returns the new LeaseID acquired -func (b BlobStorageClient) ChangeLease(container string, name string, currentLeaseID string, proposedLeaseID string) (newLeaseID string, err error) { - headers := b.client.getStandardHeaders() - headers[leaseAction] = changeLease - headers[headerLeaseID] = currentLeaseID - headers[leaseProposedID] = proposedLeaseID - - respHeaders, err := b.leaseCommonPut(container, name, headers, http.StatusOK) - if err != nil { - return "", err - } - - newLeaseID = respHeaders.Get(http.CanonicalHeaderKey(headerLeaseID)) - if newLeaseID != "" { - return newLeaseID, nil - } - - return "", errors.New("LeaseID not returned") -} - -// ReleaseLease releases the lease for a blob as per https://msdn.microsoft.com/en-us/library/azure/ee691972.aspx -func (b BlobStorageClient) ReleaseLease(container string, name string, currentLeaseID string) error { - headers := b.client.getStandardHeaders() - headers[leaseAction] = releaseLease - headers[headerLeaseID] = currentLeaseID - - _, err := b.leaseCommonPut(container, name, headers, http.StatusOK) - if err != nil { - return err - } - - return nil -} - -// RenewLease renews the lease for a blob as per https://msdn.microsoft.com/en-us/library/azure/ee691972.aspx -func (b BlobStorageClient) RenewLease(container string, name string, currentLeaseID string) error { - headers := b.client.getStandardHeaders() - headers[leaseAction] = renewLease - headers[headerLeaseID] = currentLeaseID - - _, err := b.leaseCommonPut(container, name, headers, http.StatusOK) - if err != nil { - return err - } - - return nil -} - -// GetBlobProperties provides various information about the specified -// blob. See https://msdn.microsoft.com/en-us/library/azure/dd179394.aspx -func (b BlobStorageClient) GetBlobProperties(container, name string) (*BlobProperties, error) { - uri := b.client.getEndpoint(blobServiceName, pathForBlob(container, name), url.Values{}) - - headers := b.client.getStandardHeaders() - resp, err := b.client.exec(http.MethodHead, uri, headers, nil, b.auth) - if err != nil { - return nil, err - } - defer readAndCloseBody(resp.body) - - if err = checkRespCode(resp.statusCode, []int{http.StatusOK}); err != nil { - return nil, err - } - - var contentLength int64 - contentLengthStr := resp.headers.Get("Content-Length") - if contentLengthStr != "" { - contentLength, err = strconv.ParseInt(contentLengthStr, 0, 64) - if err != nil { - return nil, err - } - } - - var sequenceNum int64 - sequenceNumStr := resp.headers.Get("x-ms-blob-sequence-number") - if sequenceNumStr != "" { - sequenceNum, err = strconv.ParseInt(sequenceNumStr, 0, 64) - if err != nil { - return nil, err - } - } - - return &BlobProperties{ - LastModified: resp.headers.Get("Last-Modified"), - Etag: resp.headers.Get("Etag"), - ContentMD5: resp.headers.Get("Content-MD5"), - ContentLength: contentLength, - ContentEncoding: resp.headers.Get("Content-Encoding"), - ContentType: resp.headers.Get("Content-Type"), - CacheControl: resp.headers.Get("Cache-Control"), - ContentLanguage: resp.headers.Get("Content-Language"), - SequenceNumber: sequenceNum, - CopyCompletionTime: resp.headers.Get("x-ms-copy-completion-time"), - CopyStatusDescription: resp.headers.Get("x-ms-copy-status-description"), - CopyID: resp.headers.Get("x-ms-copy-id"), - CopyProgress: resp.headers.Get("x-ms-copy-progress"), - CopySource: resp.headers.Get("x-ms-copy-source"), - CopyStatus: resp.headers.Get("x-ms-copy-status"), - BlobType: BlobType(resp.headers.Get("x-ms-blob-type")), - LeaseStatus: resp.headers.Get("x-ms-lease-status"), - LeaseState: resp.headers.Get("x-ms-lease-state"), - }, nil -} - -// SetBlobProperties replaces the BlobHeaders for the specified blob. -// -// Some keys may be converted to Camel-Case before sending. All keys -// are returned in lower case by GetBlobProperties. HTTP header names -// are case-insensitive so case munging should not matter to other -// applications either. -// -// See https://msdn.microsoft.com/en-us/library/azure/ee691966.aspx -func (b BlobStorageClient) SetBlobProperties(container, name string, blobHeaders BlobHeaders) error { - params := url.Values{"comp": {"properties"}} - uri := b.client.getEndpoint(blobServiceName, pathForBlob(container, name), params) - headers := b.client.getStandardHeaders() - - extraHeaders := headersFromStruct(blobHeaders) - - for k, v := range extraHeaders { - headers[k] = v - } - - resp, err := b.client.exec(http.MethodPut, uri, headers, nil, b.auth) - if err != nil { - return err - } - defer readAndCloseBody(resp.body) - - return checkRespCode(resp.statusCode, []int{http.StatusOK}) -} - -// SetBlobMetadata replaces the metadata for the specified blob. -// -// Some keys may be converted to Camel-Case before sending. All keys -// are returned in lower case by GetBlobMetadata. HTTP header names -// are case-insensitive so case munging should not matter to other -// applications either. -// -// See https://msdn.microsoft.com/en-us/library/azure/dd179414.aspx -func (b BlobStorageClient) SetBlobMetadata(container, name string, metadata map[string]string, extraHeaders map[string]string) error { - params := url.Values{"comp": {"metadata"}} - uri := b.client.getEndpoint(blobServiceName, pathForBlob(container, name), params) - metadata = b.client.protectUserAgent(metadata) - extraHeaders = b.client.protectUserAgent(extraHeaders) - headers := b.client.getStandardHeaders() - for k, v := range metadata { - headers[userDefinedMetadataHeaderPrefix+k] = v - } - - for k, v := range extraHeaders { - headers[k] = v - } - - resp, err := b.client.exec(http.MethodPut, uri, headers, nil, b.auth) - if err != nil { - return err - } - defer readAndCloseBody(resp.body) - - return checkRespCode(resp.statusCode, []int{http.StatusOK}) -} - -// GetBlobMetadata returns all user-defined metadata for the specified blob. -// -// All metadata keys will be returned in lower case. (HTTP header -// names are case-insensitive.) -// -// See https://msdn.microsoft.com/en-us/library/azure/dd179414.aspx -func (b BlobStorageClient) GetBlobMetadata(container, name string) (map[string]string, error) { - params := url.Values{"comp": {"metadata"}} - uri := b.client.getEndpoint(blobServiceName, pathForBlob(container, name), params) - headers := b.client.getStandardHeaders() - - resp, err := b.client.exec(http.MethodGet, uri, headers, nil, b.auth) - if err != nil { - return nil, err - } - defer readAndCloseBody(resp.body) - - if err := checkRespCode(resp.statusCode, []int{http.StatusOK}); err != nil { - return nil, err - } - - metadata := make(map[string]string) - for k, v := range resp.headers { - // Can't trust CanonicalHeaderKey() to munge case - // reliably. "_" is allowed in identifiers: - // https://msdn.microsoft.com/en-us/library/azure/dd179414.aspx - // https://msdn.microsoft.com/library/aa664670(VS.71).aspx - // http://tools.ietf.org/html/rfc7230#section-3.2 - // ...but "_" is considered invalid by - // CanonicalMIMEHeaderKey in - // https://golang.org/src/net/textproto/reader.go?s=14615:14659#L542 - // so k can be "X-Ms-Meta-Foo" or "x-ms-meta-foo_bar". - k = strings.ToLower(k) - if len(v) == 0 || !strings.HasPrefix(k, strings.ToLower(userDefinedMetadataHeaderPrefix)) { - continue - } - // metadata["foo"] = content of the last X-Ms-Meta-Foo header - k = k[len(userDefinedMetadataHeaderPrefix):] - metadata[k] = v[len(v)-1] - } - return metadata, nil -} - -// CreateBlockBlob initializes an empty block blob with no blocks. -// -// See https://msdn.microsoft.com/en-us/library/azure/dd179451.aspx -func (b BlobStorageClient) CreateBlockBlob(container, name string) error { - return b.CreateBlockBlobFromReader(container, name, 0, nil, nil) -} - -// CreateBlockBlobFromReader initializes a block blob using data from -// reader. Size must be the number of bytes read from reader. To -// create an empty blob, use size==0 and reader==nil. -// -// The API rejects requests with size > 256 MiB (but this limit is not -// checked by the SDK). To write a larger blob, use CreateBlockBlob, -// PutBlock, and PutBlockList. -// -// See https://msdn.microsoft.com/en-us/library/azure/dd179451.aspx -func (b BlobStorageClient) CreateBlockBlobFromReader(container, name string, size uint64, blob io.Reader, extraHeaders map[string]string) error { - path := fmt.Sprintf("%s/%s", container, name) - uri := b.client.getEndpoint(blobServiceName, path, url.Values{}) - extraHeaders = b.client.protectUserAgent(extraHeaders) - headers := b.client.getStandardHeaders() - headers["x-ms-blob-type"] = string(BlobTypeBlock) - headers["Content-Length"] = fmt.Sprintf("%d", size) - - for k, v := range extraHeaders { - headers[k] = v - } - - resp, err := b.client.exec(http.MethodPut, uri, headers, blob, b.auth) - if err != nil { - return err - } - defer readAndCloseBody(resp.body) - return checkRespCode(resp.statusCode, []int{http.StatusCreated}) -} - -// PutBlock saves the given data chunk to the specified block blob with -// given ID. -// -// The API rejects chunks larger than 100 MB (but this limit is not -// checked by the SDK). -// -// See https://msdn.microsoft.com/en-us/library/azure/dd135726.aspx -func (b BlobStorageClient) PutBlock(container, name, blockID string, chunk []byte) error { - return b.PutBlockWithLength(container, name, blockID, uint64(len(chunk)), bytes.NewReader(chunk), nil) -} - -// PutBlockWithLength saves the given data stream of exactly specified size to -// the block blob with given ID. It is an alternative to PutBlocks where data -// comes as stream but the length is known in advance. -// -// The API rejects requests with size > 100 MB (but this limit is not -// checked by the SDK). -// -// See https://msdn.microsoft.com/en-us/library/azure/dd135726.aspx -func (b BlobStorageClient) PutBlockWithLength(container, name, blockID string, size uint64, blob io.Reader, extraHeaders map[string]string) error { - uri := b.client.getEndpoint(blobServiceName, pathForBlob(container, name), url.Values{"comp": {"block"}, "blockid": {blockID}}) - extraHeaders = b.client.protectUserAgent(extraHeaders) - headers := b.client.getStandardHeaders() - headers["x-ms-blob-type"] = string(BlobTypeBlock) - headers["Content-Length"] = fmt.Sprintf("%v", size) - - for k, v := range extraHeaders { - headers[k] = v - } - - resp, err := b.client.exec(http.MethodPut, uri, headers, blob, b.auth) - if err != nil { - return err - } - - defer readAndCloseBody(resp.body) - return checkRespCode(resp.statusCode, []int{http.StatusCreated}) -} - -// PutBlockList saves list of blocks to the specified block blob. -// -// See https://msdn.microsoft.com/en-us/library/azure/dd179467.aspx -func (b BlobStorageClient) PutBlockList(container, name string, blocks []Block) error { - blockListXML := prepareBlockListRequest(blocks) - - uri := b.client.getEndpoint(blobServiceName, pathForBlob(container, name), url.Values{"comp": {"blocklist"}}) - headers := b.client.getStandardHeaders() - headers["Content-Length"] = fmt.Sprintf("%v", len(blockListXML)) - - resp, err := b.client.exec(http.MethodPut, uri, headers, strings.NewReader(blockListXML), b.auth) - if err != nil { - return err - } - defer readAndCloseBody(resp.body) - return checkRespCode(resp.statusCode, []int{http.StatusCreated}) -} - -// GetBlockList retrieves list of blocks in the specified block blob. -// -// See https://msdn.microsoft.com/en-us/library/azure/dd179400.aspx -func (b BlobStorageClient) GetBlockList(container, name string, blockType BlockListType) (BlockListResponse, error) { - params := url.Values{"comp": {"blocklist"}, "blocklisttype": {string(blockType)}} - uri := b.client.getEndpoint(blobServiceName, pathForBlob(container, name), params) - headers := b.client.getStandardHeaders() - - var out BlockListResponse - resp, err := b.client.exec(http.MethodGet, uri, headers, nil, b.auth) - if err != nil { - return out, err - } - defer resp.body.Close() - - err = xmlUnmarshal(resp.body, &out) - return out, err -} - -// PutPageBlob initializes an empty page blob with specified name and maximum -// size in bytes (size must be aligned to a 512-byte boundary). A page blob must -// be created using this method before writing pages. -// -// See https://msdn.microsoft.com/en-us/library/azure/dd179451.aspx -func (b BlobStorageClient) PutPageBlob(container, name string, size int64, extraHeaders map[string]string) error { - path := fmt.Sprintf("%s/%s", container, name) - uri := b.client.getEndpoint(blobServiceName, path, url.Values{}) - extraHeaders = b.client.protectUserAgent(extraHeaders) - headers := b.client.getStandardHeaders() - headers["x-ms-blob-type"] = string(BlobTypePage) - headers["x-ms-blob-content-length"] = fmt.Sprintf("%v", size) - - for k, v := range extraHeaders { - headers[k] = v - } - - resp, err := b.client.exec(http.MethodPut, uri, headers, nil, b.auth) - if err != nil { - return err - } - defer readAndCloseBody(resp.body) - - return checkRespCode(resp.statusCode, []int{http.StatusCreated}) -} - -// PutPage writes a range of pages to a page blob or clears the given range. -// In case of 'clear' writes, given chunk is discarded. Ranges must be aligned -// with 512-byte boundaries and chunk must be of size multiplies by 512. -// -// See https://msdn.microsoft.com/en-us/library/ee691975.aspx -func (b BlobStorageClient) PutPage(container, name string, startByte, endByte int64, writeType PageWriteType, chunk []byte, extraHeaders map[string]string) error { - path := fmt.Sprintf("%s/%s", container, name) - uri := b.client.getEndpoint(blobServiceName, path, url.Values{"comp": {"page"}}) - extraHeaders = b.client.protectUserAgent(extraHeaders) - headers := b.client.getStandardHeaders() - headers["x-ms-blob-type"] = string(BlobTypePage) - headers["x-ms-page-write"] = string(writeType) - headers["x-ms-range"] = fmt.Sprintf("bytes=%v-%v", startByte, endByte) - for k, v := range extraHeaders { - headers[k] = v - } - var contentLength int64 - var data io.Reader - if writeType == PageWriteTypeClear { - contentLength = 0 - data = bytes.NewReader([]byte{}) - } else { - contentLength = int64(len(chunk)) - data = bytes.NewReader(chunk) - } - headers["Content-Length"] = fmt.Sprintf("%v", contentLength) - - resp, err := b.client.exec(http.MethodPut, uri, headers, data, b.auth) - if err != nil { - return err - } - defer readAndCloseBody(resp.body) - - return checkRespCode(resp.statusCode, []int{http.StatusCreated}) -} - -// GetPageRanges returns the list of valid page ranges for a page blob. -// -// See https://msdn.microsoft.com/en-us/library/azure/ee691973.aspx -func (b BlobStorageClient) GetPageRanges(container, name string) (GetPageRangesResponse, error) { - path := fmt.Sprintf("%s/%s", container, name) - uri := b.client.getEndpoint(blobServiceName, path, url.Values{"comp": {"pagelist"}}) - headers := b.client.getStandardHeaders() - - var out GetPageRangesResponse - resp, err := b.client.exec(http.MethodGet, uri, headers, nil, b.auth) - if err != nil { - return out, err - } - defer resp.body.Close() - - if err = checkRespCode(resp.statusCode, []int{http.StatusOK}); err != nil { - return out, err - } - err = xmlUnmarshal(resp.body, &out) - return out, err -} - -// PutAppendBlob initializes an empty append blob with specified name. An -// append blob must be created using this method before appending blocks. -// -// See https://msdn.microsoft.com/en-us/library/azure/dd179451.aspx -func (b BlobStorageClient) PutAppendBlob(container, name string, extraHeaders map[string]string) error { - path := fmt.Sprintf("%s/%s", container, name) - uri := b.client.getEndpoint(blobServiceName, path, url.Values{}) - extraHeaders = b.client.protectUserAgent(extraHeaders) - headers := b.client.getStandardHeaders() - headers["x-ms-blob-type"] = string(BlobTypeAppend) - - for k, v := range extraHeaders { - headers[k] = v - } - - resp, err := b.client.exec(http.MethodPut, uri, headers, nil, b.auth) - if err != nil { - return err - } - defer readAndCloseBody(resp.body) - - return checkRespCode(resp.statusCode, []int{http.StatusCreated}) -} - -// AppendBlock appends a block to an append blob. -// -// See https://msdn.microsoft.com/en-us/library/azure/mt427365.aspx -func (b BlobStorageClient) AppendBlock(container, name string, chunk []byte, extraHeaders map[string]string) error { - path := fmt.Sprintf("%s/%s", container, name) - uri := b.client.getEndpoint(blobServiceName, path, url.Values{"comp": {"appendblock"}}) - extraHeaders = b.client.protectUserAgent(extraHeaders) - headers := b.client.getStandardHeaders() - headers["x-ms-blob-type"] = string(BlobTypeAppend) - headers["Content-Length"] = fmt.Sprintf("%v", len(chunk)) - - for k, v := range extraHeaders { - headers[k] = v - } - - resp, err := b.client.exec(http.MethodPut, uri, headers, bytes.NewReader(chunk), b.auth) - if err != nil { - return err - } - defer readAndCloseBody(resp.body) - - return checkRespCode(resp.statusCode, []int{http.StatusCreated}) -} - -// CopyBlob starts a blob copy operation and waits for the operation to -// complete. sourceBlob parameter must be a canonical URL to the blob (can be -// obtained using GetBlobURL method.) There is no SLA on blob copy and therefore -// this helper method works faster on smaller files. -// -// See https://msdn.microsoft.com/en-us/library/azure/dd894037.aspx -func (b BlobStorageClient) CopyBlob(container, name, sourceBlob string) error { - copyID, err := b.StartBlobCopy(container, name, sourceBlob) - if err != nil { - return err - } - - return b.WaitForBlobCopy(container, name, copyID) -} - -// StartBlobCopy starts a blob copy operation. -// sourceBlob parameter must be a canonical URL to the blob (can be -// obtained using GetBlobURL method.) -// -// See https://msdn.microsoft.com/en-us/library/azure/dd894037.aspx -func (b BlobStorageClient) StartBlobCopy(container, name, sourceBlob string) (string, error) { - uri := b.client.getEndpoint(blobServiceName, pathForBlob(container, name), url.Values{}) - - headers := b.client.getStandardHeaders() - headers["x-ms-copy-source"] = sourceBlob - - resp, err := b.client.exec(http.MethodPut, uri, headers, nil, b.auth) - if err != nil { - return "", err - } - defer readAndCloseBody(resp.body) - - if err := checkRespCode(resp.statusCode, []int{http.StatusAccepted, http.StatusCreated}); err != nil { - return "", err - } - - copyID := resp.headers.Get("x-ms-copy-id") - if copyID == "" { - return "", errors.New("Got empty copy id header") - } - return copyID, nil -} - -// AbortBlobCopy aborts a BlobCopy which has already been triggered by the StartBlobCopy function. -// copyID is generated from StartBlobCopy function. -// currentLeaseID is required IF the destination blob has an active lease on it. -// As defined in https://msdn.microsoft.com/en-us/library/azure/jj159098.aspx -func (b BlobStorageClient) AbortBlobCopy(container, name, copyID, currentLeaseID string, timeout int) error { - params := url.Values{"comp": {"copy"}, "copyid": {copyID}} - if timeout > 0 { - params.Add("timeout", strconv.Itoa(timeout)) - } - - uri := b.client.getEndpoint(blobServiceName, pathForBlob(container, name), params) - headers := b.client.getStandardHeaders() - headers["x-ms-copy-action"] = "abort" - - if currentLeaseID != "" { - headers[headerLeaseID] = currentLeaseID - } - - resp, err := b.client.exec(http.MethodPut, uri, headers, nil, b.auth) - if err != nil { - return err - } - defer readAndCloseBody(resp.body) - - if err := checkRespCode(resp.statusCode, []int{http.StatusNoContent}); err != nil { - return err - } - - return nil -} - -// WaitForBlobCopy loops until a BlobCopy operation is completed (or fails with error) -func (b BlobStorageClient) WaitForBlobCopy(container, name, copyID string) error { - for { - props, err := b.GetBlobProperties(container, name) - if err != nil { - return err - } - - if props.CopyID != copyID { - return errBlobCopyIDMismatch - } - - switch props.CopyStatus { - case blobCopyStatusSuccess: - return nil - case blobCopyStatusPending: - continue - case blobCopyStatusAborted: - return errBlobCopyAborted - case blobCopyStatusFailed: - return fmt.Errorf("storage: blob copy failed. Id=%s Description=%s", props.CopyID, props.CopyStatusDescription) - default: - return fmt.Errorf("storage: unhandled blob copy status: '%s'", props.CopyStatus) - } - } -} - -// DeleteBlob deletes the given blob from the specified container. -// If the blob does not exists at the time of the Delete Blob operation, it -// returns error. See https://msdn.microsoft.com/en-us/library/azure/dd179413.aspx -func (b BlobStorageClient) DeleteBlob(container, name string, extraHeaders map[string]string) error { - resp, err := b.deleteBlob(container, name, extraHeaders) - if err != nil { - return err - } - defer readAndCloseBody(resp.body) - return checkRespCode(resp.statusCode, []int{http.StatusAccepted}) -} - -// DeleteBlobIfExists deletes the given blob from the specified container If the -// blob is deleted with this call, returns true. Otherwise returns false. -// -// See https://msdn.microsoft.com/en-us/library/azure/dd179413.aspx -func (b BlobStorageClient) DeleteBlobIfExists(container, name string, extraHeaders map[string]string) (bool, error) { - resp, err := b.deleteBlob(container, name, extraHeaders) - if resp != nil { - defer readAndCloseBody(resp.body) - if resp.statusCode == http.StatusAccepted || resp.statusCode == http.StatusNotFound { - return resp.statusCode == http.StatusAccepted, nil - } - } - return false, err -} - -func (b BlobStorageClient) deleteBlob(container, name string, extraHeaders map[string]string) (*storageResponse, error) { - uri := b.client.getEndpoint(blobServiceName, pathForBlob(container, name), url.Values{}) - extraHeaders = b.client.protectUserAgent(extraHeaders) - headers := b.client.getStandardHeaders() - for k, v := range extraHeaders { - headers[k] = v - } - - return b.client.exec(http.MethodDelete, uri, headers, nil, b.auth) -} - -// helper method to construct the path to a blob given its container and blob -// name -func pathForBlob(container, name string) string { - return fmt.Sprintf("/%s/%s", container, name) -} - -// helper method to construct the path to either a blob or container -func pathForResource(container, name string) string { - if len(name) > 0 { - return fmt.Sprintf("/%s/%s", container, name) - } - return fmt.Sprintf("/%s", container) -} - -// GetBlobSASURIWithSignedIPAndProtocol creates an URL to the specified blob which contains the Shared -// Access Signature with specified permissions and expiration time. Also includes signedIPRange and allowed protocols. -// If old API version is used but no signedIP is passed (ie empty string) then this should still work. -// We only populate the signedIP when it non-empty. -// -// See https://msdn.microsoft.com/en-us/library/azure/ee395415.aspx -func (b BlobStorageClient) GetBlobSASURIWithSignedIPAndProtocol(container, name string, expiry time.Time, permissions string, signedIPRange string, HTTPSOnly bool) (string, error) { - var ( - signedPermissions = permissions - blobURL = b.GetBlobURL(container, name) - ) - canonicalizedResource, err := b.client.buildCanonicalizedResource(blobURL, b.auth) - if err != nil { - return "", err - } - - // "The canonicalizedresouce portion of the string is a canonical path to the signed resource. - // It must include the service name (blob, table, queue or file) for version 2015-02-21 or - // later, the storage account name, and the resource name, and must be URL-decoded. - // -- https://msdn.microsoft.com/en-us/library/azure/dn140255.aspx - - // We need to replace + with %2b first to avoid being treated as a space (which is correct for query strings, but not the path component). - canonicalizedResource = strings.Replace(canonicalizedResource, "+", "%2b", -1) - canonicalizedResource, err = url.QueryUnescape(canonicalizedResource) - if err != nil { - return "", err - } - - signedExpiry := expiry.UTC().Format(time.RFC3339) - - //If blob name is missing, resource is a container - signedResource := "c" - if len(name) > 0 { - signedResource = "b" - } - - protocols := "https,http" - if HTTPSOnly { - protocols = "https" - } - stringToSign, err := blobSASStringToSign(b.client.apiVersion, canonicalizedResource, signedExpiry, signedPermissions, signedIPRange, protocols) - if err != nil { - return "", err - } - - sig := b.client.computeHmac256(stringToSign) - sasParams := url.Values{ - "sv": {b.client.apiVersion}, - "se": {signedExpiry}, - "sr": {signedResource}, - "sp": {signedPermissions}, - "sig": {sig}, - } - - if b.client.apiVersion >= "2015-04-05" { - sasParams.Add("spr", protocols) - if signedIPRange != "" { - sasParams.Add("sip", signedIPRange) - } - } - - sasURL, err := url.Parse(blobURL) - if err != nil { - return "", err - } - sasURL.RawQuery = sasParams.Encode() - return sasURL.String(), nil -} - -// GetBlobSASURI creates an URL to the specified blob which contains the Shared -// Access Signature with specified permissions and expiration time. -// -// See https://msdn.microsoft.com/en-us/library/azure/ee395415.aspx -func (b BlobStorageClient) GetBlobSASURI(container, name string, expiry time.Time, permissions string) (string, error) { - url, err := b.GetBlobSASURIWithSignedIPAndProtocol(container, name, expiry, permissions, "", false) - return url, err -} - -func blobSASStringToSign(signedVersion, canonicalizedResource, signedExpiry, signedPermissions string, signedIP string, protocols string) (string, error) { - var signedStart, signedIdentifier, rscc, rscd, rsce, rscl, rsct string - - if signedVersion >= "2015-02-21" { - canonicalizedResource = "/blob" + canonicalizedResource - } - - // https://msdn.microsoft.com/en-us/library/azure/dn140255.aspx#Anchor_12 - if signedVersion >= "2015-04-05" { - return fmt.Sprintf("%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s", signedPermissions, signedStart, signedExpiry, canonicalizedResource, signedIdentifier, signedIP, protocols, signedVersion, rscc, rscd, rsce, rscl, rsct), nil - } - - // reference: http://msdn.microsoft.com/en-us/library/azure/dn140255.aspx - if signedVersion >= "2013-08-15" { - return fmt.Sprintf("%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s", signedPermissions, signedStart, signedExpiry, canonicalizedResource, signedIdentifier, signedVersion, rscc, rscd, rsce, rscl, rsct), nil - } - - return "", errors.New("storage: not implemented SAS for versions earlier than 2013-08-15") -} diff --git a/vendor/github.com/Azure/azure-storage-go/blobserviceclient.go b/vendor/github.com/Azure/azure-storage-go/blobserviceclient.go deleted file mode 100644 index e5911ac81bf9..000000000000 --- a/vendor/github.com/Azure/azure-storage-go/blobserviceclient.go +++ /dev/null @@ -1,92 +0,0 @@ -package storage - -import ( - "fmt" - "net/http" - "net/url" -) - -// BlobStorageClient contains operations for Microsoft Azure Blob Storage -// Service. -type BlobStorageClient struct { - client Client - auth authentication -} - -// GetServiceProperties gets the properties of your storage account's blob service. -// See: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/get-blob-service-properties -func (b *BlobStorageClient) GetServiceProperties() (*ServiceProperties, error) { - return b.client.getServiceProperties(blobServiceName, b.auth) -} - -// SetServiceProperties sets the properties of your storage account's blob service. -// See: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/set-blob-service-properties -func (b *BlobStorageClient) SetServiceProperties(props ServiceProperties) error { - return b.client.setServiceProperties(props, blobServiceName, b.auth) -} - -// ListContainersParameters defines the set of customizable parameters to make a -// List Containers call. -// -// See https://msdn.microsoft.com/en-us/library/azure/dd179352.aspx -type ListContainersParameters struct { - Prefix string - Marker string - Include string - MaxResults uint - Timeout uint -} - -// GetContainerReference returns a Container object for the specified container name. -func (b BlobStorageClient) GetContainerReference(name string) Container { - return Container{ - bsc: &b, - Name: name, - } -} - -// ListContainers returns the list of containers in a storage account along with -// pagination token and other response details. -// -// See https://msdn.microsoft.com/en-us/library/azure/dd179352.aspx -func (b BlobStorageClient) ListContainers(params ListContainersParameters) (*ContainerListResponse, error) { - q := mergeParams(params.getParameters(), url.Values{"comp": {"list"}}) - uri := b.client.getEndpoint(blobServiceName, "", q) - headers := b.client.getStandardHeaders() - - var out ContainerListResponse - resp, err := b.client.exec(http.MethodGet, uri, headers, nil, b.auth) - if err != nil { - return nil, err - } - defer resp.body.Close() - err = xmlUnmarshal(resp.body, &out) - - // assign our client to the newly created Container objects - for i := range out.Containers { - out.Containers[i].bsc = &b - } - return &out, err -} - -func (p ListContainersParameters) getParameters() url.Values { - out := url.Values{} - - if p.Prefix != "" { - out.Set("prefix", p.Prefix) - } - if p.Marker != "" { - out.Set("marker", p.Marker) - } - if p.Include != "" { - out.Set("include", p.Include) - } - if p.MaxResults != 0 { - out.Set("maxresults", fmt.Sprintf("%v", p.MaxResults)) - } - if p.Timeout != 0 { - out.Set("timeout", fmt.Sprintf("%v", p.Timeout)) - } - - return out -} diff --git a/vendor/github.com/Azure/azure-storage-go/client.go b/vendor/github.com/Azure/azure-storage-go/client.go deleted file mode 100644 index 9ddbf08aee56..000000000000 --- a/vendor/github.com/Azure/azure-storage-go/client.go +++ /dev/null @@ -1,479 +0,0 @@ -// Package storage provides clients for Microsoft Azure Storage Services. -package storage - -import ( - "bytes" - "encoding/base64" - "encoding/json" - "encoding/xml" - "errors" - "fmt" - "io" - "io/ioutil" - "net/http" - "net/url" - "runtime" - "strconv" - "strings" - - "github.com/Azure/go-autorest/autorest/azure" -) - -const ( - // DefaultBaseURL is the domain name used for storage requests in the - // public cloud when a default client is created. - DefaultBaseURL = "core.windows.net" - - // DefaultAPIVersion is the Azure Storage API version string used when a - // basic client is created. - DefaultAPIVersion = "2016-05-31" - - defaultUseHTTPS = true - - // StorageEmulatorAccountName is the fixed storage account used by Azure Storage Emulator - StorageEmulatorAccountName = "devstoreaccount1" - - // StorageEmulatorAccountKey is the the fixed storage account used by Azure Storage Emulator - StorageEmulatorAccountKey = "Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==" - - blobServiceName = "blob" - tableServiceName = "table" - queueServiceName = "queue" - fileServiceName = "file" - - storageEmulatorBlob = "127.0.0.1:10000" - storageEmulatorTable = "127.0.0.1:10002" - storageEmulatorQueue = "127.0.0.1:10001" - - userAgentHeader = "User-Agent" -) - -// Client is the object that needs to be constructed to perform -// operations on the storage account. -type Client struct { - // HTTPClient is the http.Client used to initiate API - // requests. If it is nil, http.DefaultClient is used. - HTTPClient *http.Client - - accountName string - accountKey []byte - useHTTPS bool - UseSharedKeyLite bool - baseURL string - apiVersion string - userAgent string -} - -type storageResponse struct { - statusCode int - headers http.Header - body io.ReadCloser -} - -type odataResponse struct { - storageResponse - odata odataErrorMessage -} - -// AzureStorageServiceError contains fields of the error response from -// Azure Storage Service REST API. See https://msdn.microsoft.com/en-us/library/azure/dd179382.aspx -// Some fields might be specific to certain calls. -type AzureStorageServiceError struct { - Code string `xml:"Code"` - Message string `xml:"Message"` - AuthenticationErrorDetail string `xml:"AuthenticationErrorDetail"` - QueryParameterName string `xml:"QueryParameterName"` - QueryParameterValue string `xml:"QueryParameterValue"` - Reason string `xml:"Reason"` - StatusCode int - RequestID string -} - -type odataErrorMessageMessage struct { - Lang string `json:"lang"` - Value string `json:"value"` -} - -type odataErrorMessageInternal struct { - Code string `json:"code"` - Message odataErrorMessageMessage `json:"message"` -} - -type odataErrorMessage struct { - Err odataErrorMessageInternal `json:"odata.error"` -} - -// UnexpectedStatusCodeError is returned when a storage service responds with neither an error -// nor with an HTTP status code indicating success. -type UnexpectedStatusCodeError struct { - allowed []int - got int -} - -func (e UnexpectedStatusCodeError) Error() string { - s := func(i int) string { return fmt.Sprintf("%d %s", i, http.StatusText(i)) } - - got := s(e.got) - expected := []string{} - for _, v := range e.allowed { - expected = append(expected, s(v)) - } - return fmt.Sprintf("storage: status code from service response is %s; was expecting %s", got, strings.Join(expected, " or ")) -} - -// Got is the actual status code returned by Azure. -func (e UnexpectedStatusCodeError) Got() int { - return e.got -} - -// NewBasicClient constructs a Client with given storage service name and -// key. -func NewBasicClient(accountName, accountKey string) (Client, error) { - if accountName == StorageEmulatorAccountName { - return NewEmulatorClient() - } - return NewClient(accountName, accountKey, DefaultBaseURL, DefaultAPIVersion, defaultUseHTTPS) -} - -// NewBasicClientOnSovereignCloud constructs a Client with given storage service name and -// key in the referenced cloud. -func NewBasicClientOnSovereignCloud(accountName, accountKey string, env azure.Environment) (Client, error) { - if accountName == StorageEmulatorAccountName { - return NewEmulatorClient() - } - return NewClient(accountName, accountKey, env.StorageEndpointSuffix, DefaultAPIVersion, defaultUseHTTPS) -} - -//NewEmulatorClient contructs a Client intended to only work with Azure -//Storage Emulator -func NewEmulatorClient() (Client, error) { - return NewClient(StorageEmulatorAccountName, StorageEmulatorAccountKey, DefaultBaseURL, DefaultAPIVersion, false) -} - -// NewClient constructs a Client. This should be used if the caller wants -// to specify whether to use HTTPS, a specific REST API version or a custom -// storage endpoint than Azure Public Cloud. -func NewClient(accountName, accountKey, blobServiceBaseURL, apiVersion string, useHTTPS bool) (Client, error) { - var c Client - if accountName == "" { - return c, fmt.Errorf("azure: account name required") - } else if accountKey == "" { - return c, fmt.Errorf("azure: account key required") - } else if blobServiceBaseURL == "" { - return c, fmt.Errorf("azure: base storage service url required") - } - - key, err := base64.StdEncoding.DecodeString(accountKey) - if err != nil { - return c, fmt.Errorf("azure: malformed storage account key: %v", err) - } - - c = Client{ - accountName: accountName, - accountKey: key, - useHTTPS: useHTTPS, - baseURL: blobServiceBaseURL, - apiVersion: apiVersion, - UseSharedKeyLite: false, - } - c.userAgent = c.getDefaultUserAgent() - return c, nil -} - -func (c Client) getDefaultUserAgent() string { - return fmt.Sprintf("Go/%s (%s-%s) Azure-SDK-For-Go/%s storage-dataplane/%s", - runtime.Version(), - runtime.GOARCH, - runtime.GOOS, - sdkVersion, - c.apiVersion, - ) -} - -// AddToUserAgent adds an extension to the current user agent -func (c *Client) AddToUserAgent(extension string) error { - if extension != "" { - c.userAgent = fmt.Sprintf("%s %s", c.userAgent, extension) - return nil - } - return fmt.Errorf("Extension was empty, User Agent stayed as %s", c.userAgent) -} - -// protectUserAgent is used in funcs that include extraheaders as a parameter. -// It prevents the User-Agent header to be overwritten, instead if it happens to -// be present, it gets added to the current User-Agent. Use it before getStandardHeaders -func (c *Client) protectUserAgent(extraheaders map[string]string) map[string]string { - if v, ok := extraheaders[userAgentHeader]; ok { - c.AddToUserAgent(v) - delete(extraheaders, userAgentHeader) - } - return extraheaders -} - -func (c Client) getBaseURL(service string) string { - scheme := "http" - if c.useHTTPS { - scheme = "https" - } - host := "" - if c.accountName == StorageEmulatorAccountName { - switch service { - case blobServiceName: - host = storageEmulatorBlob - case tableServiceName: - host = storageEmulatorTable - case queueServiceName: - host = storageEmulatorQueue - } - } else { - host = fmt.Sprintf("%s.%s.%s", c.accountName, service, c.baseURL) - } - - u := &url.URL{ - Scheme: scheme, - Host: host} - return u.String() -} - -func (c Client) getEndpoint(service, path string, params url.Values) string { - u, err := url.Parse(c.getBaseURL(service)) - if err != nil { - // really should not be happening - panic(err) - } - - // API doesn't accept path segments not starting with '/' - if !strings.HasPrefix(path, "/") { - path = fmt.Sprintf("/%v", path) - } - - if c.accountName == StorageEmulatorAccountName { - path = fmt.Sprintf("/%v%v", StorageEmulatorAccountName, path) - } - - u.Path = path - u.RawQuery = params.Encode() - return u.String() -} - -// GetBlobService returns a BlobStorageClient which can operate on the blob -// service of the storage account. -func (c Client) GetBlobService() BlobStorageClient { - b := BlobStorageClient{ - client: c, - } - b.client.AddToUserAgent(blobServiceName) - b.auth = sharedKey - if c.UseSharedKeyLite { - b.auth = sharedKeyLite - } - return b -} - -// GetQueueService returns a QueueServiceClient which can operate on the queue -// service of the storage account. -func (c Client) GetQueueService() QueueServiceClient { - q := QueueServiceClient{ - client: c, - } - q.client.AddToUserAgent(queueServiceName) - q.auth = sharedKey - if c.UseSharedKeyLite { - q.auth = sharedKeyLite - } - return q -} - -// GetTableService returns a TableServiceClient which can operate on the table -// service of the storage account. -func (c Client) GetTableService() TableServiceClient { - t := TableServiceClient{ - client: c, - } - t.client.AddToUserAgent(tableServiceName) - t.auth = sharedKeyForTable - if c.UseSharedKeyLite { - t.auth = sharedKeyLiteForTable - } - return t -} - -// GetFileService returns a FileServiceClient which can operate on the file -// service of the storage account. -func (c Client) GetFileService() FileServiceClient { - f := FileServiceClient{ - client: c, - } - f.client.AddToUserAgent(fileServiceName) - f.auth = sharedKey - if c.UseSharedKeyLite { - f.auth = sharedKeyLite - } - return f -} - -func (c Client) getStandardHeaders() map[string]string { - return map[string]string{ - userAgentHeader: c.userAgent, - "x-ms-version": c.apiVersion, - "x-ms-date": currentTimeRfc1123Formatted(), - } -} - -func (c Client) exec(verb, url string, headers map[string]string, body io.Reader, auth authentication) (*storageResponse, error) { - headers, err := c.addAuthorizationHeader(verb, url, headers, auth) - if err != nil { - return nil, err - } - - req, err := http.NewRequest(verb, url, body) - if err != nil { - return nil, errors.New("azure/storage: error creating request: " + err.Error()) - } - - if clstr, ok := headers["Content-Length"]; ok { - // content length header is being signed, but completely ignored by golang. - // instead we have to use the ContentLength property on the request struct - // (see https://golang.org/src/net/http/request.go?s=18140:18370#L536 and - // https://golang.org/src/net/http/transfer.go?s=1739:2467#L49) - req.ContentLength, err = strconv.ParseInt(clstr, 10, 64) - if err != nil { - return nil, err - } - } - for k, v := range headers { - req.Header.Add(k, v) - } - - httpClient := c.HTTPClient - if httpClient == nil { - httpClient = http.DefaultClient - } - resp, err := httpClient.Do(req) - if err != nil { - return nil, err - } - - statusCode := resp.StatusCode - if statusCode >= 400 && statusCode <= 505 { - var respBody []byte - respBody, err = readAndCloseBody(resp.Body) - if err != nil { - return nil, err - } - - requestID := resp.Header.Get("x-ms-request-id") - if len(respBody) == 0 { - // no error in response body, might happen in HEAD requests - err = serviceErrFromStatusCode(resp.StatusCode, resp.Status, requestID) - } else { - // response contains storage service error object, unmarshal - storageErr, errIn := serviceErrFromXML(respBody, resp.StatusCode, requestID) - if err != nil { // error unmarshaling the error response - err = errIn - } - err = storageErr - } - return &storageResponse{ - statusCode: resp.StatusCode, - headers: resp.Header, - body: ioutil.NopCloser(bytes.NewReader(respBody)), /* restore the body */ - }, err - } - - return &storageResponse{ - statusCode: resp.StatusCode, - headers: resp.Header, - body: resp.Body}, nil -} - -func (c Client) execInternalJSON(verb, url string, headers map[string]string, body io.Reader, auth authentication) (*odataResponse, error) { - headers, err := c.addAuthorizationHeader(verb, url, headers, auth) - if err != nil { - return nil, err - } - - req, err := http.NewRequest(verb, url, body) - for k, v := range headers { - req.Header.Add(k, v) - } - - httpClient := c.HTTPClient - if httpClient == nil { - httpClient = http.DefaultClient - } - - resp, err := httpClient.Do(req) - if err != nil { - return nil, err - } - - respToRet := &odataResponse{} - respToRet.body = resp.Body - respToRet.statusCode = resp.StatusCode - respToRet.headers = resp.Header - - statusCode := resp.StatusCode - if statusCode >= 400 && statusCode <= 505 { - var respBody []byte - respBody, err = readAndCloseBody(resp.Body) - if err != nil { - return nil, err - } - - if len(respBody) == 0 { - // no error in response body, might happen in HEAD requests - err = serviceErrFromStatusCode(resp.StatusCode, resp.Status, resp.Header.Get("x-ms-request-id")) - return respToRet, err - } - // try unmarshal as odata.error json - err = json.Unmarshal(respBody, &respToRet.odata) - return respToRet, err - } - - return respToRet, nil -} - -func readAndCloseBody(body io.ReadCloser) ([]byte, error) { - defer body.Close() - out, err := ioutil.ReadAll(body) - if err == io.EOF { - err = nil - } - return out, err -} - -func serviceErrFromXML(body []byte, statusCode int, requestID string) (AzureStorageServiceError, error) { - var storageErr AzureStorageServiceError - if err := xml.Unmarshal(body, &storageErr); err != nil { - return storageErr, err - } - storageErr.StatusCode = statusCode - storageErr.RequestID = requestID - return storageErr, nil -} - -func serviceErrFromStatusCode(code int, status string, requestID string) AzureStorageServiceError { - return AzureStorageServiceError{ - StatusCode: code, - Code: status, - RequestID: requestID, - Message: "no response body was available for error status code", - } -} - -func (e AzureStorageServiceError) Error() string { - return fmt.Sprintf("storage: service returned error: StatusCode=%d, ErrorCode=%s, ErrorMessage=%s, RequestId=%s, QueryParameterName=%s, QueryParameterValue=%s", - e.StatusCode, e.Code, e.Message, e.RequestID, e.QueryParameterName, e.QueryParameterValue) -} - -// checkRespCode returns UnexpectedStatusError if the given response code is not -// one of the allowed status codes; otherwise nil. -func checkRespCode(respCode int, allowed []int) error { - for _, v := range allowed { - if respCode == v { - return nil - } - } - return UnexpectedStatusCodeError{allowed, respCode} -} diff --git a/vendor/github.com/Azure/azure-storage-go/container.go b/vendor/github.com/Azure/azure-storage-go/container.go deleted file mode 100644 index f064239674ef..000000000000 --- a/vendor/github.com/Azure/azure-storage-go/container.go +++ /dev/null @@ -1,376 +0,0 @@ -package storage - -import ( - "encoding/xml" - "errors" - "fmt" - "io" - "net/http" - "net/url" - "strconv" - "time" -) - -// Container represents an Azure container. -type Container struct { - bsc *BlobStorageClient - Name string `xml:"Name"` - Properties ContainerProperties `xml:"Properties"` -} - -func (c *Container) buildPath() string { - return fmt.Sprintf("/%s", c.Name) -} - -// ContainerProperties contains various properties of a container returned from -// various endpoints like ListContainers. -type ContainerProperties struct { - LastModified string `xml:"Last-Modified"` - Etag string `xml:"Etag"` - LeaseStatus string `xml:"LeaseStatus"` - LeaseState string `xml:"LeaseState"` - LeaseDuration string `xml:"LeaseDuration"` -} - -// ContainerListResponse contains the response fields from -// ListContainers call. -// -// See https://msdn.microsoft.com/en-us/library/azure/dd179352.aspx -type ContainerListResponse struct { - XMLName xml.Name `xml:"EnumerationResults"` - Xmlns string `xml:"xmlns,attr"` - Prefix string `xml:"Prefix"` - Marker string `xml:"Marker"` - NextMarker string `xml:"NextMarker"` - MaxResults int64 `xml:"MaxResults"` - Containers []Container `xml:"Containers>Container"` -} - -// BlobListResponse contains the response fields from ListBlobs call. -// -// See https://msdn.microsoft.com/en-us/library/azure/dd135734.aspx -type BlobListResponse struct { - XMLName xml.Name `xml:"EnumerationResults"` - Xmlns string `xml:"xmlns,attr"` - Prefix string `xml:"Prefix"` - Marker string `xml:"Marker"` - NextMarker string `xml:"NextMarker"` - MaxResults int64 `xml:"MaxResults"` - Blobs []Blob `xml:"Blobs>Blob"` - - // BlobPrefix is used to traverse blobs as if it were a file system. - // It is returned if ListBlobsParameters.Delimiter is specified. - // The list here can be thought of as "folders" that may contain - // other folders or blobs. - BlobPrefixes []string `xml:"Blobs>BlobPrefix>Name"` - - // Delimiter is used to traverse blobs as if it were a file system. - // It is returned if ListBlobsParameters.Delimiter is specified. - Delimiter string `xml:"Delimiter"` -} - -// ListBlobsParameters defines the set of customizable -// parameters to make a List Blobs call. -// -// See https://msdn.microsoft.com/en-us/library/azure/dd135734.aspx -type ListBlobsParameters struct { - Prefix string - Delimiter string - Marker string - Include string - MaxResults uint - Timeout uint -} - -func (p ListBlobsParameters) getParameters() url.Values { - out := url.Values{} - - if p.Prefix != "" { - out.Set("prefix", p.Prefix) - } - if p.Delimiter != "" { - out.Set("delimiter", p.Delimiter) - } - if p.Marker != "" { - out.Set("marker", p.Marker) - } - if p.Include != "" { - out.Set("include", p.Include) - } - if p.MaxResults != 0 { - out.Set("maxresults", fmt.Sprintf("%v", p.MaxResults)) - } - if p.Timeout != 0 { - out.Set("timeout", fmt.Sprintf("%v", p.Timeout)) - } - - return out -} - -// ContainerAccessType defines the access level to the container from a public -// request. -// -// See https://msdn.microsoft.com/en-us/library/azure/dd179468.aspx and "x-ms- -// blob-public-access" header. -type ContainerAccessType string - -// Access options for containers -const ( - ContainerAccessTypePrivate ContainerAccessType = "" - ContainerAccessTypeBlob ContainerAccessType = "blob" - ContainerAccessTypeContainer ContainerAccessType = "container" -) - -// ContainerAccessPolicy represents each access policy in the container ACL. -type ContainerAccessPolicy struct { - ID string - StartTime time.Time - ExpiryTime time.Time - CanRead bool - CanWrite bool - CanDelete bool -} - -// ContainerPermissions represents the container ACLs. -type ContainerPermissions struct { - AccessType ContainerAccessType - AccessPolicies []ContainerAccessPolicy -} - -// ContainerAccessHeader references header used when setting/getting container ACL -const ( - ContainerAccessHeader string = "x-ms-blob-public-access" -) - -// Create creates a blob container within the storage account -// with given name and access level. Returns error if container already exists. -// -// See https://msdn.microsoft.com/en-us/library/azure/dd179468.aspx -func (c *Container) Create() error { - resp, err := c.create() - if err != nil { - return err - } - defer readAndCloseBody(resp.body) - return checkRespCode(resp.statusCode, []int{http.StatusCreated}) -} - -// CreateIfNotExists creates a blob container if it does not exist. Returns -// true if container is newly created or false if container already exists. -func (c *Container) CreateIfNotExists() (bool, error) { - resp, err := c.create() - if resp != nil { - defer readAndCloseBody(resp.body) - if resp.statusCode == http.StatusCreated || resp.statusCode == http.StatusConflict { - return resp.statusCode == http.StatusCreated, nil - } - } - return false, err -} - -func (c *Container) create() (*storageResponse, error) { - uri := c.bsc.client.getEndpoint(blobServiceName, c.buildPath(), url.Values{"restype": {"container"}}) - headers := c.bsc.client.getStandardHeaders() - return c.bsc.client.exec(http.MethodPut, uri, headers, nil, c.bsc.auth) -} - -// Exists returns true if a container with given name exists -// on the storage account, otherwise returns false. -func (c *Container) Exists() (bool, error) { - uri := c.bsc.client.getEndpoint(blobServiceName, c.buildPath(), url.Values{"restype": {"container"}}) - headers := c.bsc.client.getStandardHeaders() - - resp, err := c.bsc.client.exec(http.MethodHead, uri, headers, nil, c.bsc.auth) - if resp != nil { - defer readAndCloseBody(resp.body) - if resp.statusCode == http.StatusOK || resp.statusCode == http.StatusNotFound { - return resp.statusCode == http.StatusOK, nil - } - } - return false, err -} - -// SetPermissions sets up container permissions as per https://msdn.microsoft.com/en-us/library/azure/dd179391.aspx -func (c *Container) SetPermissions(permissions ContainerPermissions, timeout int, leaseID string) error { - params := url.Values{ - "restype": {"container"}, - "comp": {"acl"}, - } - - if timeout > 0 { - params.Add("timeout", strconv.Itoa(timeout)) - } - - uri := c.bsc.client.getEndpoint(blobServiceName, c.buildPath(), params) - headers := c.bsc.client.getStandardHeaders() - if permissions.AccessType != "" { - headers[ContainerAccessHeader] = string(permissions.AccessType) - } - - if leaseID != "" { - headers[headerLeaseID] = leaseID - } - - body, length, err := generateContainerACLpayload(permissions.AccessPolicies) - headers["Content-Length"] = strconv.Itoa(length) - - resp, err := c.bsc.client.exec(http.MethodPut, uri, headers, body, c.bsc.auth) - if err != nil { - return err - } - defer readAndCloseBody(resp.body) - - if err := checkRespCode(resp.statusCode, []int{http.StatusOK}); err != nil { - return errors.New("Unable to set permissions") - } - - return nil -} - -// GetPermissions gets the container permissions as per https://msdn.microsoft.com/en-us/library/azure/dd179469.aspx -// If timeout is 0 then it will not be passed to Azure -// leaseID will only be passed to Azure if populated -func (c *Container) GetPermissions(timeout int, leaseID string) (*ContainerPermissions, error) { - params := url.Values{ - "restype": {"container"}, - "comp": {"acl"}, - } - - if timeout > 0 { - params.Add("timeout", strconv.Itoa(timeout)) - } - - uri := c.bsc.client.getEndpoint(blobServiceName, c.buildPath(), params) - headers := c.bsc.client.getStandardHeaders() - - if leaseID != "" { - headers[headerLeaseID] = leaseID - } - - resp, err := c.bsc.client.exec(http.MethodGet, uri, headers, nil, c.bsc.auth) - if err != nil { - return nil, err - } - defer resp.body.Close() - - var ap AccessPolicy - err = xmlUnmarshal(resp.body, &ap.SignedIdentifiersList) - if err != nil { - return nil, err - } - return buildAccessPolicy(ap, &resp.headers), nil -} - -func buildAccessPolicy(ap AccessPolicy, headers *http.Header) *ContainerPermissions { - // containerAccess. Blob, Container, empty - containerAccess := headers.Get(http.CanonicalHeaderKey(ContainerAccessHeader)) - permissions := ContainerPermissions{ - AccessType: ContainerAccessType(containerAccess), - AccessPolicies: []ContainerAccessPolicy{}, - } - - for _, policy := range ap.SignedIdentifiersList.SignedIdentifiers { - capd := ContainerAccessPolicy{ - ID: policy.ID, - StartTime: policy.AccessPolicy.StartTime, - ExpiryTime: policy.AccessPolicy.ExpiryTime, - } - capd.CanRead = updatePermissions(policy.AccessPolicy.Permission, "r") - capd.CanWrite = updatePermissions(policy.AccessPolicy.Permission, "w") - capd.CanDelete = updatePermissions(policy.AccessPolicy.Permission, "d") - - permissions.AccessPolicies = append(permissions.AccessPolicies, capd) - } - return &permissions -} - -// Delete deletes the container with given name on the storage -// account. If the container does not exist returns error. -// -// See https://msdn.microsoft.com/en-us/library/azure/dd179408.aspx -func (c *Container) Delete() error { - resp, err := c.delete() - if err != nil { - return err - } - defer readAndCloseBody(resp.body) - return checkRespCode(resp.statusCode, []int{http.StatusAccepted}) -} - -// DeleteIfExists deletes the container with given name on the storage -// account if it exists. Returns true if container is deleted with this call, or -// false if the container did not exist at the time of the Delete Container -// operation. -// -// See https://msdn.microsoft.com/en-us/library/azure/dd179408.aspx -func (c *Container) DeleteIfExists() (bool, error) { - resp, err := c.delete() - if resp != nil { - defer readAndCloseBody(resp.body) - if resp.statusCode == http.StatusAccepted || resp.statusCode == http.StatusNotFound { - return resp.statusCode == http.StatusAccepted, nil - } - } - return false, err -} - -func (c *Container) delete() (*storageResponse, error) { - uri := c.bsc.client.getEndpoint(blobServiceName, c.buildPath(), url.Values{"restype": {"container"}}) - headers := c.bsc.client.getStandardHeaders() - return c.bsc.client.exec(http.MethodDelete, uri, headers, nil, c.bsc.auth) -} - -// ListBlobs returns an object that contains list of blobs in the container, -// pagination token and other information in the response of List Blobs call. -// -// See https://msdn.microsoft.com/en-us/library/azure/dd135734.aspx -func (c *Container) ListBlobs(params ListBlobsParameters) (BlobListResponse, error) { - q := mergeParams(params.getParameters(), url.Values{ - "restype": {"container"}, - "comp": {"list"}}, - ) - uri := c.bsc.client.getEndpoint(blobServiceName, c.buildPath(), q) - headers := c.bsc.client.getStandardHeaders() - - var out BlobListResponse - resp, err := c.bsc.client.exec(http.MethodGet, uri, headers, nil, c.bsc.auth) - if err != nil { - return out, err - } - defer resp.body.Close() - - err = xmlUnmarshal(resp.body, &out) - return out, err -} - -func generateContainerACLpayload(policies []ContainerAccessPolicy) (io.Reader, int, error) { - sil := SignedIdentifiers{ - SignedIdentifiers: []SignedIdentifier{}, - } - for _, capd := range policies { - permission := capd.generateContainerPermissions() - signedIdentifier := convertAccessPolicyToXMLStructs(capd.ID, capd.StartTime, capd.ExpiryTime, permission) - sil.SignedIdentifiers = append(sil.SignedIdentifiers, signedIdentifier) - } - return xmlMarshal(sil) -} - -func (capd *ContainerAccessPolicy) generateContainerPermissions() (permissions string) { - // generate the permissions string (rwd). - // still want the end user API to have bool flags. - permissions = "" - - if capd.CanRead { - permissions += "r" - } - - if capd.CanWrite { - permissions += "w" - } - - if capd.CanDelete { - permissions += "d" - } - - return permissions -} diff --git a/vendor/github.com/Azure/azure-storage-go/directory.go b/vendor/github.com/Azure/azure-storage-go/directory.go deleted file mode 100644 index d27e62079a18..000000000000 --- a/vendor/github.com/Azure/azure-storage-go/directory.go +++ /dev/null @@ -1,217 +0,0 @@ -package storage - -import ( - "encoding/xml" - "net/http" - "net/url" -) - -// Directory represents a directory on a share. -type Directory struct { - fsc *FileServiceClient - Metadata map[string]string - Name string `xml:"Name"` - parent *Directory - Properties DirectoryProperties - share *Share -} - -// DirectoryProperties contains various properties of a directory. -type DirectoryProperties struct { - LastModified string `xml:"Last-Modified"` - Etag string `xml:"Etag"` -} - -// ListDirsAndFilesParameters defines the set of customizable parameters to -// make a List Files and Directories call. -// -// See https://msdn.microsoft.com/en-us/library/azure/dn166980.aspx -type ListDirsAndFilesParameters struct { - Marker string - MaxResults uint - Timeout uint -} - -// DirsAndFilesListResponse contains the response fields from -// a List Files and Directories call. -// -// See https://msdn.microsoft.com/en-us/library/azure/dn166980.aspx -type DirsAndFilesListResponse struct { - XMLName xml.Name `xml:"EnumerationResults"` - Xmlns string `xml:"xmlns,attr"` - Marker string `xml:"Marker"` - MaxResults int64 `xml:"MaxResults"` - Directories []Directory `xml:"Entries>Directory"` - Files []File `xml:"Entries>File"` - NextMarker string `xml:"NextMarker"` -} - -// builds the complete directory path for this directory object. -func (d *Directory) buildPath() string { - path := "" - current := d - for current.Name != "" { - path = "/" + current.Name + path - current = current.parent - } - return d.share.buildPath() + path -} - -// Create this directory in the associated share. -// If a directory with the same name already exists, the operation fails. -// -// See https://msdn.microsoft.com/en-us/library/azure/dn166993.aspx -func (d *Directory) Create() error { - // if this is the root directory exit early - if d.parent == nil { - return nil - } - - headers, err := d.fsc.createResource(d.buildPath(), resourceDirectory, nil, mergeMDIntoExtraHeaders(d.Metadata, nil), []int{http.StatusCreated}) - if err != nil { - return err - } - - d.updateEtagAndLastModified(headers) - return nil -} - -// CreateIfNotExists creates this directory under the associated share if the -// directory does not exists. Returns true if the directory is newly created or -// false if the directory already exists. -// -// See https://msdn.microsoft.com/en-us/library/azure/dn166993.aspx -func (d *Directory) CreateIfNotExists() (bool, error) { - // if this is the root directory exit early - if d.parent == nil { - return false, nil - } - - resp, err := d.fsc.createResourceNoClose(d.buildPath(), resourceDirectory, nil, nil) - if resp != nil { - defer readAndCloseBody(resp.body) - if resp.statusCode == http.StatusCreated || resp.statusCode == http.StatusConflict { - if resp.statusCode == http.StatusCreated { - d.updateEtagAndLastModified(resp.headers) - return true, nil - } - - return false, d.FetchAttributes() - } - } - - return false, err -} - -// Delete removes this directory. It must be empty in order to be deleted. -// If the directory does not exist the operation fails. -// -// See https://msdn.microsoft.com/en-us/library/azure/dn166969.aspx -func (d *Directory) Delete() error { - return d.fsc.deleteResource(d.buildPath(), resourceDirectory) -} - -// DeleteIfExists removes this directory if it exists. -// -// See https://msdn.microsoft.com/en-us/library/azure/dn166969.aspx -func (d *Directory) DeleteIfExists() (bool, error) { - resp, err := d.fsc.deleteResourceNoClose(d.buildPath(), resourceDirectory) - if resp != nil { - defer readAndCloseBody(resp.body) - if resp.statusCode == http.StatusAccepted || resp.statusCode == http.StatusNotFound { - return resp.statusCode == http.StatusAccepted, nil - } - } - return false, err -} - -// Exists returns true if this directory exists. -func (d *Directory) Exists() (bool, error) { - exists, headers, err := d.fsc.resourceExists(d.buildPath(), resourceDirectory) - if exists { - d.updateEtagAndLastModified(headers) - } - return exists, err -} - -// FetchAttributes retrieves metadata for this directory. -func (d *Directory) FetchAttributes() error { - headers, err := d.fsc.getResourceHeaders(d.buildPath(), compNone, resourceDirectory, http.MethodHead) - if err != nil { - return err - } - - d.updateEtagAndLastModified(headers) - d.Metadata = getMetadataFromHeaders(headers) - - return nil -} - -// GetDirectoryReference returns a child Directory object for this directory. -func (d *Directory) GetDirectoryReference(name string) *Directory { - return &Directory{ - fsc: d.fsc, - Name: name, - parent: d, - share: d.share, - } -} - -// GetFileReference returns a child File object for this directory. -func (d *Directory) GetFileReference(name string) *File { - return &File{ - fsc: d.fsc, - Name: name, - parent: d, - share: d.share, - } -} - -// ListDirsAndFiles returns a list of files and directories under this directory. -// It also contains a pagination token and other response details. -// -// See https://msdn.microsoft.com/en-us/library/azure/dn166980.aspx -func (d *Directory) ListDirsAndFiles(params ListDirsAndFilesParameters) (*DirsAndFilesListResponse, error) { - q := mergeParams(params.getParameters(), getURLInitValues(compList, resourceDirectory)) - - resp, err := d.fsc.listContent(d.buildPath(), q, nil) - if err != nil { - return nil, err - } - - defer resp.body.Close() - var out DirsAndFilesListResponse - err = xmlUnmarshal(resp.body, &out) - return &out, err -} - -// SetMetadata replaces the metadata for this directory. -// -// Some keys may be converted to Camel-Case before sending. All keys -// are returned in lower case by GetDirectoryMetadata. HTTP header names -// are case-insensitive so case munging should not matter to other -// applications either. -// -// See https://msdn.microsoft.com/en-us/library/azure/mt427370.aspx -func (d *Directory) SetMetadata() error { - headers, err := d.fsc.setResourceHeaders(d.buildPath(), compMetadata, resourceDirectory, mergeMDIntoExtraHeaders(d.Metadata, nil)) - if err != nil { - return err - } - - d.updateEtagAndLastModified(headers) - return nil -} - -// updates Etag and last modified date -func (d *Directory) updateEtagAndLastModified(headers http.Header) { - d.Properties.Etag = headers.Get("Etag") - d.Properties.LastModified = headers.Get("Last-Modified") -} - -// URL gets the canonical URL to this directory. -// This method does not create a publicly accessible URL if the directory -// is private and this method does not check if the directory exists. -func (d *Directory) URL() string { - return d.fsc.client.getEndpoint(fileServiceName, d.buildPath(), url.Values{}) -} diff --git a/vendor/github.com/Azure/azure-storage-go/file.go b/vendor/github.com/Azure/azure-storage-go/file.go deleted file mode 100644 index e4901a1144f0..000000000000 --- a/vendor/github.com/Azure/azure-storage-go/file.go +++ /dev/null @@ -1,412 +0,0 @@ -package storage - -import ( - "errors" - "fmt" - "io" - "io/ioutil" - "net/http" - "net/url" - "strconv" -) - -const fourMB = uint64(4194304) -const oneTB = uint64(1099511627776) - -// File represents a file on a share. -type File struct { - fsc *FileServiceClient - Metadata map[string]string - Name string `xml:"Name"` - parent *Directory - Properties FileProperties `xml:"Properties"` - share *Share - FileCopyProperties FileCopyState -} - -// FileProperties contains various properties of a file. -type FileProperties struct { - CacheControl string `header:"x-ms-cache-control"` - Disposition string `header:"x-ms-content-disposition"` - Encoding string `header:"x-ms-content-encoding"` - Etag string - Language string `header:"x-ms-content-language"` - LastModified string - Length uint64 `xml:"Content-Length"` - MD5 string `header:"x-ms-content-md5"` - Type string `header:"x-ms-content-type"` -} - -// FileCopyState contains various properties of a file copy operation. -type FileCopyState struct { - CompletionTime string - ID string `header:"x-ms-copy-id"` - Progress string - Source string - Status string `header:"x-ms-copy-status"` - StatusDesc string -} - -// FileStream contains file data returned from a call to GetFile. -type FileStream struct { - Body io.ReadCloser - ContentMD5 string -} - -// FileRequestOptions will be passed to misc file operations. -// Currently just Timeout (in seconds) but will expand. -type FileRequestOptions struct { - Timeout uint // timeout duration in seconds. -} - -// getParameters, construct parameters for FileRequestOptions. -// currently only timeout, but expecting to grow as functionality fills out. -func (p FileRequestOptions) getParameters() url.Values { - out := url.Values{} - - if p.Timeout != 0 { - out.Set("timeout", fmt.Sprintf("%v", p.Timeout)) - } - - return out -} - -// FileRanges contains a list of file range information for a file. -// -// See https://msdn.microsoft.com/en-us/library/azure/dn166984.aspx -type FileRanges struct { - ContentLength uint64 - LastModified string - ETag string - FileRanges []FileRange `xml:"Range"` -} - -// FileRange contains range information for a file. -// -// See https://msdn.microsoft.com/en-us/library/azure/dn166984.aspx -type FileRange struct { - Start uint64 `xml:"Start"` - End uint64 `xml:"End"` -} - -func (fr FileRange) String() string { - return fmt.Sprintf("bytes=%d-%d", fr.Start, fr.End) -} - -// builds the complete file path for this file object -func (f *File) buildPath() string { - return f.parent.buildPath() + "/" + f.Name -} - -// ClearRange releases the specified range of space in a file. -// -// See https://msdn.microsoft.com/en-us/library/azure/dn194276.aspx -func (f *File) ClearRange(fileRange FileRange) error { - headers, err := f.modifyRange(nil, fileRange, nil) - if err != nil { - return err - } - - f.updateEtagAndLastModified(headers) - return nil -} - -// Create creates a new file or replaces an existing one. -// -// See https://msdn.microsoft.com/en-us/library/azure/dn194271.aspx -func (f *File) Create(maxSize uint64) error { - if maxSize > oneTB { - return fmt.Errorf("max file size is 1TB") - } - - extraHeaders := map[string]string{ - "x-ms-content-length": strconv.FormatUint(maxSize, 10), - "x-ms-type": "file", - } - - headers, err := f.fsc.createResource(f.buildPath(), resourceFile, nil, mergeMDIntoExtraHeaders(f.Metadata, extraHeaders), []int{http.StatusCreated}) - if err != nil { - return err - } - - f.Properties.Length = maxSize - f.updateEtagAndLastModified(headers) - return nil -} - -// CopyFile operation copied a file/blob from the sourceURL to the path provided. -// -// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/copy-file -func (f *File) CopyFile(sourceURL string, options *FileRequestOptions) error { - extraHeaders := map[string]string{ - "x-ms-type": "file", - "x-ms-copy-source": sourceURL, - } - - var parameters url.Values - if options != nil { - parameters = options.getParameters() - } - - headers, err := f.fsc.createResource(f.buildPath(), resourceFile, parameters, mergeMDIntoExtraHeaders(f.Metadata, extraHeaders), []int{http.StatusAccepted}) - if err != nil { - return err - } - - f.updateEtagLastModifiedAndCopyHeaders(headers) - return nil -} - -// Delete immediately removes this file from the storage account. -// -// See https://msdn.microsoft.com/en-us/library/azure/dn689085.aspx -func (f *File) Delete() error { - return f.fsc.deleteResource(f.buildPath(), resourceFile) -} - -// DeleteIfExists removes this file if it exists. -// -// See https://msdn.microsoft.com/en-us/library/azure/dn689085.aspx -func (f *File) DeleteIfExists() (bool, error) { - resp, err := f.fsc.deleteResourceNoClose(f.buildPath(), resourceFile) - if resp != nil { - defer readAndCloseBody(resp.body) - if resp.statusCode == http.StatusAccepted || resp.statusCode == http.StatusNotFound { - return resp.statusCode == http.StatusAccepted, nil - } - } - return false, err -} - -// DownloadRangeToStream operation downloads the specified range of this file with optional MD5 hash. -// -// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/get-file -func (f *File) DownloadRangeToStream(fileRange FileRange, getContentMD5 bool) (fs FileStream, err error) { - if getContentMD5 && isRangeTooBig(fileRange) { - return fs, fmt.Errorf("must specify a range less than or equal to 4MB when getContentMD5 is true") - } - - extraHeaders := map[string]string{ - "Range": fileRange.String(), - } - if getContentMD5 == true { - extraHeaders["x-ms-range-get-content-md5"] = "true" - } - - resp, err := f.fsc.getResourceNoClose(f.buildPath(), compNone, resourceFile, http.MethodGet, extraHeaders) - if err != nil { - return fs, err - } - - if err = checkRespCode(resp.statusCode, []int{http.StatusOK, http.StatusPartialContent}); err != nil { - resp.body.Close() - return fs, err - } - - fs.Body = resp.body - if getContentMD5 { - fs.ContentMD5 = resp.headers.Get("Content-MD5") - } - return fs, nil -} - -// Exists returns true if this file exists. -func (f *File) Exists() (bool, error) { - exists, headers, err := f.fsc.resourceExists(f.buildPath(), resourceFile) - if exists { - f.updateEtagAndLastModified(headers) - f.updateProperties(headers) - } - return exists, err -} - -// FetchAttributes updates metadata and properties for this file. -func (f *File) FetchAttributes() error { - headers, err := f.fsc.getResourceHeaders(f.buildPath(), compNone, resourceFile, http.MethodHead) - if err != nil { - return err - } - - f.updateEtagAndLastModified(headers) - f.updateProperties(headers) - f.Metadata = getMetadataFromHeaders(headers) - return nil -} - -// returns true if the range is larger than 4MB -func isRangeTooBig(fileRange FileRange) bool { - if fileRange.End-fileRange.Start > fourMB { - return true - } - - return false -} - -// ListRanges returns the list of valid ranges for this file. -// -// See https://msdn.microsoft.com/en-us/library/azure/dn166984.aspx -func (f *File) ListRanges(listRange *FileRange) (*FileRanges, error) { - params := url.Values{"comp": {"rangelist"}} - - // add optional range to list - var headers map[string]string - if listRange != nil { - headers = make(map[string]string) - headers["Range"] = listRange.String() - } - - resp, err := f.fsc.listContent(f.buildPath(), params, headers) - if err != nil { - return nil, err - } - - defer resp.body.Close() - var cl uint64 - cl, err = strconv.ParseUint(resp.headers.Get("x-ms-content-length"), 10, 64) - if err != nil { - ioutil.ReadAll(resp.body) - return nil, err - } - - var out FileRanges - out.ContentLength = cl - out.ETag = resp.headers.Get("ETag") - out.LastModified = resp.headers.Get("Last-Modified") - - err = xmlUnmarshal(resp.body, &out) - return &out, err -} - -// modifies a range of bytes in this file -func (f *File) modifyRange(bytes io.Reader, fileRange FileRange, contentMD5 *string) (http.Header, error) { - if err := f.fsc.checkForStorageEmulator(); err != nil { - return nil, err - } - if fileRange.End < fileRange.Start { - return nil, errors.New("the value for rangeEnd must be greater than or equal to rangeStart") - } - if bytes != nil && isRangeTooBig(fileRange) { - return nil, errors.New("range cannot exceed 4MB in size") - } - - uri := f.fsc.client.getEndpoint(fileServiceName, f.buildPath(), url.Values{"comp": {"range"}}) - - // default to clear - write := "clear" - cl := uint64(0) - - // if bytes is not nil then this is an update operation - if bytes != nil { - write = "update" - cl = (fileRange.End - fileRange.Start) + 1 - } - - extraHeaders := map[string]string{ - "Content-Length": strconv.FormatUint(cl, 10), - "Range": fileRange.String(), - "x-ms-write": write, - } - - if contentMD5 != nil { - extraHeaders["Content-MD5"] = *contentMD5 - } - - headers := mergeHeaders(f.fsc.client.getStandardHeaders(), extraHeaders) - resp, err := f.fsc.client.exec(http.MethodPut, uri, headers, bytes, f.fsc.auth) - if err != nil { - return nil, err - } - defer readAndCloseBody(resp.body) - return resp.headers, checkRespCode(resp.statusCode, []int{http.StatusCreated}) -} - -// SetMetadata replaces the metadata for this file. -// -// Some keys may be converted to Camel-Case before sending. All keys -// are returned in lower case by GetFileMetadata. HTTP header names -// are case-insensitive so case munging should not matter to other -// applications either. -// -// See https://msdn.microsoft.com/en-us/library/azure/dn689097.aspx -func (f *File) SetMetadata() error { - headers, err := f.fsc.setResourceHeaders(f.buildPath(), compMetadata, resourceFile, mergeMDIntoExtraHeaders(f.Metadata, nil)) - if err != nil { - return err - } - - f.updateEtagAndLastModified(headers) - return nil -} - -// SetProperties sets system properties on this file. -// -// Some keys may be converted to Camel-Case before sending. All keys -// are returned in lower case by SetFileProperties. HTTP header names -// are case-insensitive so case munging should not matter to other -// applications either. -// -// See https://msdn.microsoft.com/en-us/library/azure/dn166975.aspx -func (f *File) SetProperties() error { - headers, err := f.fsc.setResourceHeaders(f.buildPath(), compProperties, resourceFile, headersFromStruct(f.Properties)) - if err != nil { - return err - } - - f.updateEtagAndLastModified(headers) - return nil -} - -// updates Etag and last modified date -func (f *File) updateEtagAndLastModified(headers http.Header) { - f.Properties.Etag = headers.Get("Etag") - f.Properties.LastModified = headers.Get("Last-Modified") -} - -// updates Etag, last modified date and x-ms-copy-id -func (f *File) updateEtagLastModifiedAndCopyHeaders(headers http.Header) { - f.Properties.Etag = headers.Get("Etag") - f.Properties.LastModified = headers.Get("Last-Modified") - f.FileCopyProperties.ID = headers.Get("X-Ms-Copy-Id") - f.FileCopyProperties.Status = headers.Get("X-Ms-Copy-Status") -} - -// updates file properties from the specified HTTP header -func (f *File) updateProperties(header http.Header) { - size, err := strconv.ParseUint(header.Get("Content-Length"), 10, 64) - if err == nil { - f.Properties.Length = size - } - - f.updateEtagAndLastModified(header) - f.Properties.CacheControl = header.Get("Cache-Control") - f.Properties.Disposition = header.Get("Content-Disposition") - f.Properties.Encoding = header.Get("Content-Encoding") - f.Properties.Language = header.Get("Content-Language") - f.Properties.MD5 = header.Get("Content-MD5") - f.Properties.Type = header.Get("Content-Type") -} - -// URL gets the canonical URL to this file. -// This method does not create a publicly accessible URL if the file -// is private and this method does not check if the file exists. -func (f *File) URL() string { - return f.fsc.client.getEndpoint(fileServiceName, f.buildPath(), url.Values{}) -} - -// WriteRange writes a range of bytes to this file with an optional MD5 hash of the content. -// Note that the length of bytes must match (rangeEnd - rangeStart) + 1 with a maximum size of 4MB. -// -// See https://msdn.microsoft.com/en-us/library/azure/dn194276.aspx -func (f *File) WriteRange(bytes io.Reader, fileRange FileRange, contentMD5 *string) error { - if bytes == nil { - return errors.New("bytes cannot be nil") - } - - headers, err := f.modifyRange(bytes, fileRange, contentMD5) - if err != nil { - return err - } - - f.updateEtagAndLastModified(headers) - return nil -} diff --git a/vendor/github.com/Azure/azure-storage-go/fileserviceclient.go b/vendor/github.com/Azure/azure-storage-go/fileserviceclient.go deleted file mode 100644 index d68bd7f64e98..000000000000 --- a/vendor/github.com/Azure/azure-storage-go/fileserviceclient.go +++ /dev/null @@ -1,375 +0,0 @@ -package storage - -import ( - "encoding/xml" - "fmt" - "net/http" - "net/url" - "strings" -) - -// FileServiceClient contains operations for Microsoft Azure File Service. -type FileServiceClient struct { - client Client - auth authentication -} - -// ListSharesParameters defines the set of customizable parameters to make a -// List Shares call. -// -// See https://msdn.microsoft.com/en-us/library/azure/dn167009.aspx -type ListSharesParameters struct { - Prefix string - Marker string - Include string - MaxResults uint - Timeout uint -} - -// ShareListResponse contains the response fields from -// ListShares call. -// -// See https://msdn.microsoft.com/en-us/library/azure/dn167009.aspx -type ShareListResponse struct { - XMLName xml.Name `xml:"EnumerationResults"` - Xmlns string `xml:"xmlns,attr"` - Prefix string `xml:"Prefix"` - Marker string `xml:"Marker"` - NextMarker string `xml:"NextMarker"` - MaxResults int64 `xml:"MaxResults"` - Shares []Share `xml:"Shares>Share"` -} - -type compType string - -const ( - compNone compType = "" - compList compType = "list" - compMetadata compType = "metadata" - compProperties compType = "properties" - compRangeList compType = "rangelist" -) - -func (ct compType) String() string { - return string(ct) -} - -type resourceType string - -const ( - resourceDirectory resourceType = "directory" - resourceFile resourceType = "" - resourceShare resourceType = "share" -) - -func (rt resourceType) String() string { - return string(rt) -} - -func (p ListSharesParameters) getParameters() url.Values { - out := url.Values{} - - if p.Prefix != "" { - out.Set("prefix", p.Prefix) - } - if p.Marker != "" { - out.Set("marker", p.Marker) - } - if p.Include != "" { - out.Set("include", p.Include) - } - if p.MaxResults != 0 { - out.Set("maxresults", fmt.Sprintf("%v", p.MaxResults)) - } - if p.Timeout != 0 { - out.Set("timeout", fmt.Sprintf("%v", p.Timeout)) - } - - return out -} - -func (p ListDirsAndFilesParameters) getParameters() url.Values { - out := url.Values{} - - if p.Marker != "" { - out.Set("marker", p.Marker) - } - if p.MaxResults != 0 { - out.Set("maxresults", fmt.Sprintf("%v", p.MaxResults)) - } - if p.Timeout != 0 { - out.Set("timeout", fmt.Sprintf("%v", p.Timeout)) - } - - return out -} - -// returns url.Values for the specified types -func getURLInitValues(comp compType, res resourceType) url.Values { - values := url.Values{} - if comp != compNone { - values.Set("comp", comp.String()) - } - if res != resourceFile { - values.Set("restype", res.String()) - } - return values -} - -// GetShareReference returns a Share object for the specified share name. -func (f FileServiceClient) GetShareReference(name string) Share { - return Share{ - fsc: &f, - Name: name, - Properties: ShareProperties{ - Quota: -1, - }, - } -} - -// ListShares returns the list of shares in a storage account along with -// pagination token and other response details. -// -// See https://msdn.microsoft.com/en-us/library/azure/dd179352.aspx -func (f FileServiceClient) ListShares(params ListSharesParameters) (*ShareListResponse, error) { - q := mergeParams(params.getParameters(), url.Values{"comp": {"list"}}) - - var out ShareListResponse - resp, err := f.listContent("", q, nil) - if err != nil { - return nil, err - } - defer resp.body.Close() - err = xmlUnmarshal(resp.body, &out) - - // assign our client to the newly created Share objects - for i := range out.Shares { - out.Shares[i].fsc = &f - } - return &out, err -} - -// GetServiceProperties gets the properties of your storage account's file service. -// File service does not support logging -// See: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/get-file-service-properties -func (f *FileServiceClient) GetServiceProperties() (*ServiceProperties, error) { - return f.client.getServiceProperties(fileServiceName, f.auth) -} - -// SetServiceProperties sets the properties of your storage account's file service. -// File service does not support logging -// See: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/set-file-service-properties -func (f *FileServiceClient) SetServiceProperties(props ServiceProperties) error { - return f.client.setServiceProperties(props, fileServiceName, f.auth) -} - -// retrieves directory or share content -func (f FileServiceClient) listContent(path string, params url.Values, extraHeaders map[string]string) (*storageResponse, error) { - if err := f.checkForStorageEmulator(); err != nil { - return nil, err - } - - uri := f.client.getEndpoint(fileServiceName, path, params) - extraHeaders = f.client.protectUserAgent(extraHeaders) - headers := mergeHeaders(f.client.getStandardHeaders(), extraHeaders) - - resp, err := f.client.exec(http.MethodGet, uri, headers, nil, f.auth) - if err != nil { - return nil, err - } - - if err = checkRespCode(resp.statusCode, []int{http.StatusOK}); err != nil { - readAndCloseBody(resp.body) - return nil, err - } - - return resp, nil -} - -// returns true if the specified resource exists -func (f FileServiceClient) resourceExists(path string, res resourceType) (bool, http.Header, error) { - if err := f.checkForStorageEmulator(); err != nil { - return false, nil, err - } - - uri := f.client.getEndpoint(fileServiceName, path, getURLInitValues(compNone, res)) - headers := f.client.getStandardHeaders() - - resp, err := f.client.exec(http.MethodHead, uri, headers, nil, f.auth) - if resp != nil { - defer readAndCloseBody(resp.body) - if resp.statusCode == http.StatusOK || resp.statusCode == http.StatusNotFound { - return resp.statusCode == http.StatusOK, resp.headers, nil - } - } - return false, nil, err -} - -// creates a resource depending on the specified resource type -func (f FileServiceClient) createResource(path string, res resourceType, urlParams url.Values, extraHeaders map[string]string, expectedResponseCodes []int) (http.Header, error) { - resp, err := f.createResourceNoClose(path, res, urlParams, extraHeaders) - if err != nil { - return nil, err - } - defer readAndCloseBody(resp.body) - return resp.headers, checkRespCode(resp.statusCode, expectedResponseCodes) -} - -// creates a resource depending on the specified resource type, doesn't close the response body -func (f FileServiceClient) createResourceNoClose(path string, res resourceType, urlParams url.Values, extraHeaders map[string]string) (*storageResponse, error) { - if err := f.checkForStorageEmulator(); err != nil { - return nil, err - } - - values := getURLInitValues(compNone, res) - combinedParams := mergeParams(values, urlParams) - uri := f.client.getEndpoint(fileServiceName, path, combinedParams) - extraHeaders = f.client.protectUserAgent(extraHeaders) - headers := mergeHeaders(f.client.getStandardHeaders(), extraHeaders) - - return f.client.exec(http.MethodPut, uri, headers, nil, f.auth) -} - -// returns HTTP header data for the specified directory or share -func (f FileServiceClient) getResourceHeaders(path string, comp compType, res resourceType, verb string) (http.Header, error) { - resp, err := f.getResourceNoClose(path, comp, res, verb, nil) - if err != nil { - return nil, err - } - defer readAndCloseBody(resp.body) - - if err = checkRespCode(resp.statusCode, []int{http.StatusOK}); err != nil { - return nil, err - } - - return resp.headers, nil -} - -// gets the specified resource, doesn't close the response body -func (f FileServiceClient) getResourceNoClose(path string, comp compType, res resourceType, verb string, extraHeaders map[string]string) (*storageResponse, error) { - if err := f.checkForStorageEmulator(); err != nil { - return nil, err - } - - params := getURLInitValues(comp, res) - uri := f.client.getEndpoint(fileServiceName, path, params) - extraHeaders = f.client.protectUserAgent(extraHeaders) - headers := mergeHeaders(f.client.getStandardHeaders(), extraHeaders) - - return f.client.exec(verb, uri, headers, nil, f.auth) -} - -// deletes the resource and returns the response -func (f FileServiceClient) deleteResource(path string, res resourceType) error { - resp, err := f.deleteResourceNoClose(path, res) - if err != nil { - return err - } - defer readAndCloseBody(resp.body) - return checkRespCode(resp.statusCode, []int{http.StatusAccepted}) -} - -// deletes the resource and returns the response, doesn't close the response body -func (f FileServiceClient) deleteResourceNoClose(path string, res resourceType) (*storageResponse, error) { - if err := f.checkForStorageEmulator(); err != nil { - return nil, err - } - - values := getURLInitValues(compNone, res) - uri := f.client.getEndpoint(fileServiceName, path, values) - return f.client.exec(http.MethodDelete, uri, f.client.getStandardHeaders(), nil, f.auth) -} - -// merges metadata into extraHeaders and returns extraHeaders -func mergeMDIntoExtraHeaders(metadata, extraHeaders map[string]string) map[string]string { - if metadata == nil && extraHeaders == nil { - return nil - } - if extraHeaders == nil { - extraHeaders = make(map[string]string) - } - for k, v := range metadata { - extraHeaders[userDefinedMetadataHeaderPrefix+k] = v - } - return extraHeaders -} - -// merges extraHeaders into headers and returns headers -func mergeHeaders(headers, extraHeaders map[string]string) map[string]string { - for k, v := range extraHeaders { - headers[k] = v - } - return headers -} - -// sets extra header data for the specified resource -func (f FileServiceClient) setResourceHeaders(path string, comp compType, res resourceType, extraHeaders map[string]string) (http.Header, error) { - if err := f.checkForStorageEmulator(); err != nil { - return nil, err - } - - params := getURLInitValues(comp, res) - uri := f.client.getEndpoint(fileServiceName, path, params) - extraHeaders = f.client.protectUserAgent(extraHeaders) - headers := mergeHeaders(f.client.getStandardHeaders(), extraHeaders) - - resp, err := f.client.exec(http.MethodPut, uri, headers, nil, f.auth) - if err != nil { - return nil, err - } - defer readAndCloseBody(resp.body) - - return resp.headers, checkRespCode(resp.statusCode, []int{http.StatusOK}) -} - -// gets metadata for the specified resource -func (f FileServiceClient) getMetadata(path string, res resourceType) (map[string]string, error) { - if err := f.checkForStorageEmulator(); err != nil { - return nil, err - } - - headers, err := f.getResourceHeaders(path, compMetadata, res, http.MethodGet) - if err != nil { - return nil, err - } - - return getMetadataFromHeaders(headers), nil -} - -// returns a map of custom metadata values from the specified HTTP header -func getMetadataFromHeaders(header http.Header) map[string]string { - metadata := make(map[string]string) - for k, v := range header { - // Can't trust CanonicalHeaderKey() to munge case - // reliably. "_" is allowed in identifiers: - // https://msdn.microsoft.com/en-us/library/azure/dd179414.aspx - // https://msdn.microsoft.com/library/aa664670(VS.71).aspx - // http://tools.ietf.org/html/rfc7230#section-3.2 - // ...but "_" is considered invalid by - // CanonicalMIMEHeaderKey in - // https://golang.org/src/net/textproto/reader.go?s=14615:14659#L542 - // so k can be "X-Ms-Meta-Foo" or "x-ms-meta-foo_bar". - k = strings.ToLower(k) - if len(v) == 0 || !strings.HasPrefix(k, strings.ToLower(userDefinedMetadataHeaderPrefix)) { - continue - } - // metadata["foo"] = content of the last X-Ms-Meta-Foo header - k = k[len(userDefinedMetadataHeaderPrefix):] - metadata[k] = v[len(v)-1] - } - - if len(metadata) == 0 { - return nil - } - - return metadata -} - -//checkForStorageEmulator determines if the client is setup for use with -//Azure Storage Emulator, and returns a relevant error -func (f FileServiceClient) checkForStorageEmulator() error { - if f.client.accountName == StorageEmulatorAccountName { - return fmt.Errorf("Error: File service is not currently supported by Azure Storage Emulator") - } - return nil -} diff --git a/vendor/github.com/Azure/azure-storage-go/glide.lock b/vendor/github.com/Azure/azure-storage-go/glide.lock deleted file mode 100644 index 5d3ce83618a8..000000000000 --- a/vendor/github.com/Azure/azure-storage-go/glide.lock +++ /dev/null @@ -1,14 +0,0 @@ -hash: a97c0c90fe4d23bbd8e5745431f633e75530bb611131b786d76b8e1763bce85e -updated: 2017-02-23T09:58:57.3701584-08:00 -imports: -- name: github.com/Azure/go-autorest - version: ec5f4903f77ed9927ac95b19ab8e44ada64c1356 - subpackages: - - autorest/azure - - autorest - - autorest/date -- name: github.com/dgrijalva/jwt-go - version: 2268707a8f0843315e2004ee4f1d021dc08baedf -testImports: -- name: gopkg.in/check.v1 - version: 20d25e2804050c1cd24a7eea1e7a6447dd0e74ec diff --git a/vendor/github.com/Azure/azure-storage-go/glide.yaml b/vendor/github.com/Azure/azure-storage-go/glide.yaml deleted file mode 100644 index e6783b77410b..000000000000 --- a/vendor/github.com/Azure/azure-storage-go/glide.yaml +++ /dev/null @@ -1,4 +0,0 @@ -package: github.com/Azure/azure-sdk-for-go-storage -import: [] -testImport: -- package: gopkg.in/check.v1 diff --git a/vendor/github.com/Azure/azure-storage-go/queue.go b/vendor/github.com/Azure/azure-storage-go/queue.go deleted file mode 100644 index 4031410aebfc..000000000000 --- a/vendor/github.com/Azure/azure-storage-go/queue.go +++ /dev/null @@ -1,339 +0,0 @@ -package storage - -import ( - "encoding/xml" - "fmt" - "net/http" - "net/url" - "strconv" - "strings" -) - -const ( - // casing is per Golang's http.Header canonicalizing the header names. - approximateMessagesCountHeader = "X-Ms-Approximate-Messages-Count" - userDefinedMetadataHeaderPrefix = "X-Ms-Meta-" -) - -func pathForQueue(queue string) string { return fmt.Sprintf("/%s", queue) } -func pathForQueueMessages(queue string) string { return fmt.Sprintf("/%s/messages", queue) } -func pathForMessage(queue, name string) string { return fmt.Sprintf("/%s/messages/%s", queue, name) } - -type putMessageRequest struct { - XMLName xml.Name `xml:"QueueMessage"` - MessageText string `xml:"MessageText"` -} - -// PutMessageParameters is the set of options can be specified for Put Messsage -// operation. A zero struct does not use any preferences for the request. -type PutMessageParameters struct { - VisibilityTimeout int - MessageTTL int -} - -func (p PutMessageParameters) getParameters() url.Values { - out := url.Values{} - if p.VisibilityTimeout != 0 { - out.Set("visibilitytimeout", strconv.Itoa(p.VisibilityTimeout)) - } - if p.MessageTTL != 0 { - out.Set("messagettl", strconv.Itoa(p.MessageTTL)) - } - return out -} - -// GetMessagesParameters is the set of options can be specified for Get -// Messsages operation. A zero struct does not use any preferences for the -// request. -type GetMessagesParameters struct { - NumOfMessages int - VisibilityTimeout int -} - -func (p GetMessagesParameters) getParameters() url.Values { - out := url.Values{} - if p.NumOfMessages != 0 { - out.Set("numofmessages", strconv.Itoa(p.NumOfMessages)) - } - if p.VisibilityTimeout != 0 { - out.Set("visibilitytimeout", strconv.Itoa(p.VisibilityTimeout)) - } - return out -} - -// PeekMessagesParameters is the set of options can be specified for Peek -// Messsage operation. A zero struct does not use any preferences for the -// request. -type PeekMessagesParameters struct { - NumOfMessages int -} - -func (p PeekMessagesParameters) getParameters() url.Values { - out := url.Values{"peekonly": {"true"}} // Required for peek operation - if p.NumOfMessages != 0 { - out.Set("numofmessages", strconv.Itoa(p.NumOfMessages)) - } - return out -} - -// UpdateMessageParameters is the set of options can be specified for Update Messsage -// operation. A zero struct does not use any preferences for the request. -type UpdateMessageParameters struct { - PopReceipt string - VisibilityTimeout int -} - -func (p UpdateMessageParameters) getParameters() url.Values { - out := url.Values{} - if p.PopReceipt != "" { - out.Set("popreceipt", p.PopReceipt) - } - if p.VisibilityTimeout != 0 { - out.Set("visibilitytimeout", strconv.Itoa(p.VisibilityTimeout)) - } - return out -} - -// GetMessagesResponse represents a response returned from Get Messages -// operation. -type GetMessagesResponse struct { - XMLName xml.Name `xml:"QueueMessagesList"` - QueueMessagesList []GetMessageResponse `xml:"QueueMessage"` -} - -// GetMessageResponse represents a QueueMessage object returned from Get -// Messages operation response. -type GetMessageResponse struct { - MessageID string `xml:"MessageId"` - InsertionTime string `xml:"InsertionTime"` - ExpirationTime string `xml:"ExpirationTime"` - PopReceipt string `xml:"PopReceipt"` - TimeNextVisible string `xml:"TimeNextVisible"` - DequeueCount int `xml:"DequeueCount"` - MessageText string `xml:"MessageText"` -} - -// PeekMessagesResponse represents a response returned from Get Messages -// operation. -type PeekMessagesResponse struct { - XMLName xml.Name `xml:"QueueMessagesList"` - QueueMessagesList []PeekMessageResponse `xml:"QueueMessage"` -} - -// PeekMessageResponse represents a QueueMessage object returned from Peek -// Messages operation response. -type PeekMessageResponse struct { - MessageID string `xml:"MessageId"` - InsertionTime string `xml:"InsertionTime"` - ExpirationTime string `xml:"ExpirationTime"` - DequeueCount int `xml:"DequeueCount"` - MessageText string `xml:"MessageText"` -} - -// QueueMetadataResponse represents user defined metadata and queue -// properties on a specific queue. -// -// See https://msdn.microsoft.com/en-us/library/azure/dd179384.aspx -type QueueMetadataResponse struct { - ApproximateMessageCount int - UserDefinedMetadata map[string]string -} - -// SetMetadata operation sets user-defined metadata on the specified queue. -// Metadata is associated with the queue as name-value pairs. -// -// See https://msdn.microsoft.com/en-us/library/azure/dd179348.aspx -func (c QueueServiceClient) SetMetadata(name string, metadata map[string]string) error { - uri := c.client.getEndpoint(queueServiceName, pathForQueue(name), url.Values{"comp": []string{"metadata"}}) - metadata = c.client.protectUserAgent(metadata) - headers := c.client.getStandardHeaders() - for k, v := range metadata { - headers[userDefinedMetadataHeaderPrefix+k] = v - } - - resp, err := c.client.exec(http.MethodPut, uri, headers, nil, c.auth) - if err != nil { - return err - } - defer readAndCloseBody(resp.body) - - return checkRespCode(resp.statusCode, []int{http.StatusNoContent}) -} - -// GetMetadata operation retrieves user-defined metadata and queue -// properties on the specified queue. Metadata is associated with -// the queue as name-values pairs. -// -// See https://msdn.microsoft.com/en-us/library/azure/dd179384.aspx -// -// Because the way Golang's http client (and http.Header in particular) -// canonicalize header names, the returned metadata names would always -// be all lower case. -func (c QueueServiceClient) GetMetadata(name string) (QueueMetadataResponse, error) { - qm := QueueMetadataResponse{} - qm.UserDefinedMetadata = make(map[string]string) - uri := c.client.getEndpoint(queueServiceName, pathForQueue(name), url.Values{"comp": []string{"metadata"}}) - headers := c.client.getStandardHeaders() - resp, err := c.client.exec(http.MethodGet, uri, headers, nil, c.auth) - if err != nil { - return qm, err - } - defer readAndCloseBody(resp.body) - - for k, v := range resp.headers { - if len(v) != 1 { - return qm, fmt.Errorf("Unexpected number of values (%d) in response header '%s'", len(v), k) - } - - value := v[0] - - if k == approximateMessagesCountHeader { - qm.ApproximateMessageCount, err = strconv.Atoi(value) - if err != nil { - return qm, fmt.Errorf("Unexpected value in response header '%s': '%s' ", k, value) - } - } else if strings.HasPrefix(k, userDefinedMetadataHeaderPrefix) { - name := strings.TrimPrefix(k, userDefinedMetadataHeaderPrefix) - qm.UserDefinedMetadata[strings.ToLower(name)] = value - } - } - - return qm, checkRespCode(resp.statusCode, []int{http.StatusOK}) -} - -// CreateQueue operation creates a queue under the given account. -// -// See https://msdn.microsoft.com/en-us/library/azure/dd179342.aspx -func (c QueueServiceClient) CreateQueue(name string) error { - uri := c.client.getEndpoint(queueServiceName, pathForQueue(name), url.Values{}) - headers := c.client.getStandardHeaders() - resp, err := c.client.exec(http.MethodPut, uri, headers, nil, c.auth) - if err != nil { - return err - } - defer readAndCloseBody(resp.body) - return checkRespCode(resp.statusCode, []int{http.StatusCreated}) -} - -// DeleteQueue operation permanently deletes the specified queue. -// -// See https://msdn.microsoft.com/en-us/library/azure/dd179436.aspx -func (c QueueServiceClient) DeleteQueue(name string) error { - uri := c.client.getEndpoint(queueServiceName, pathForQueue(name), url.Values{}) - resp, err := c.client.exec(http.MethodDelete, uri, c.client.getStandardHeaders(), nil, c.auth) - if err != nil { - return err - } - defer readAndCloseBody(resp.body) - return checkRespCode(resp.statusCode, []int{http.StatusNoContent}) -} - -// QueueExists returns true if a queue with given name exists. -func (c QueueServiceClient) QueueExists(name string) (bool, error) { - uri := c.client.getEndpoint(queueServiceName, pathForQueue(name), url.Values{"comp": {"metadata"}}) - resp, err := c.client.exec(http.MethodGet, uri, c.client.getStandardHeaders(), nil, c.auth) - if resp != nil && (resp.statusCode == http.StatusOK || resp.statusCode == http.StatusNotFound) { - return resp.statusCode == http.StatusOK, nil - } - - return false, err -} - -// PutMessage operation adds a new message to the back of the message queue. -// -// See https://msdn.microsoft.com/en-us/library/azure/dd179346.aspx -func (c QueueServiceClient) PutMessage(queue string, message string, params PutMessageParameters) error { - uri := c.client.getEndpoint(queueServiceName, pathForQueueMessages(queue), params.getParameters()) - req := putMessageRequest{MessageText: message} - body, nn, err := xmlMarshal(req) - if err != nil { - return err - } - headers := c.client.getStandardHeaders() - headers["Content-Length"] = strconv.Itoa(nn) - resp, err := c.client.exec(http.MethodPost, uri, headers, body, c.auth) - if err != nil { - return err - } - defer readAndCloseBody(resp.body) - return checkRespCode(resp.statusCode, []int{http.StatusCreated}) -} - -// ClearMessages operation deletes all messages from the specified queue. -// -// See https://msdn.microsoft.com/en-us/library/azure/dd179454.aspx -func (c QueueServiceClient) ClearMessages(queue string) error { - uri := c.client.getEndpoint(queueServiceName, pathForQueueMessages(queue), url.Values{}) - resp, err := c.client.exec(http.MethodDelete, uri, c.client.getStandardHeaders(), nil, c.auth) - if err != nil { - return err - } - defer readAndCloseBody(resp.body) - return checkRespCode(resp.statusCode, []int{http.StatusNoContent}) -} - -// GetMessages operation retrieves one or more messages from the front of the -// queue. -// -// See https://msdn.microsoft.com/en-us/library/azure/dd179474.aspx -func (c QueueServiceClient) GetMessages(queue string, params GetMessagesParameters) (GetMessagesResponse, error) { - var r GetMessagesResponse - uri := c.client.getEndpoint(queueServiceName, pathForQueueMessages(queue), params.getParameters()) - resp, err := c.client.exec(http.MethodGet, uri, c.client.getStandardHeaders(), nil, c.auth) - if err != nil { - return r, err - } - defer resp.body.Close() - err = xmlUnmarshal(resp.body, &r) - return r, err -} - -// PeekMessages retrieves one or more messages from the front of the queue, but -// does not alter the visibility of the message. -// -// See https://msdn.microsoft.com/en-us/library/azure/dd179472.aspx -func (c QueueServiceClient) PeekMessages(queue string, params PeekMessagesParameters) (PeekMessagesResponse, error) { - var r PeekMessagesResponse - uri := c.client.getEndpoint(queueServiceName, pathForQueueMessages(queue), params.getParameters()) - resp, err := c.client.exec(http.MethodGet, uri, c.client.getStandardHeaders(), nil, c.auth) - if err != nil { - return r, err - } - defer resp.body.Close() - err = xmlUnmarshal(resp.body, &r) - return r, err -} - -// DeleteMessage operation deletes the specified message. -// -// See https://msdn.microsoft.com/en-us/library/azure/dd179347.aspx -func (c QueueServiceClient) DeleteMessage(queue, messageID, popReceipt string) error { - uri := c.client.getEndpoint(queueServiceName, pathForMessage(queue, messageID), url.Values{ - "popreceipt": {popReceipt}}) - resp, err := c.client.exec(http.MethodDelete, uri, c.client.getStandardHeaders(), nil, c.auth) - if err != nil { - return err - } - defer readAndCloseBody(resp.body) - return checkRespCode(resp.statusCode, []int{http.StatusNoContent}) -} - -// UpdateMessage operation deletes the specified message. -// -// See https://msdn.microsoft.com/en-us/library/azure/hh452234.aspx -func (c QueueServiceClient) UpdateMessage(queue string, messageID string, message string, params UpdateMessageParameters) error { - uri := c.client.getEndpoint(queueServiceName, pathForMessage(queue, messageID), params.getParameters()) - req := putMessageRequest{MessageText: message} - body, nn, err := xmlMarshal(req) - if err != nil { - return err - } - headers := c.client.getStandardHeaders() - headers["Content-Length"] = fmt.Sprintf("%d", nn) - resp, err := c.client.exec(http.MethodPut, uri, headers, body, c.auth) - if err != nil { - return err - } - defer readAndCloseBody(resp.body) - return checkRespCode(resp.statusCode, []int{http.StatusNoContent}) -} diff --git a/vendor/github.com/Azure/azure-storage-go/queueserviceclient.go b/vendor/github.com/Azure/azure-storage-go/queueserviceclient.go deleted file mode 100644 index c26141339b73..000000000000 --- a/vendor/github.com/Azure/azure-storage-go/queueserviceclient.go +++ /dev/null @@ -1,20 +0,0 @@ -package storage - -// QueueServiceClient contains operations for Microsoft Azure Queue Storage -// Service. -type QueueServiceClient struct { - client Client - auth authentication -} - -// GetServiceProperties gets the properties of your storage account's queue service. -// See: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/get-queue-service-properties -func (c *QueueServiceClient) GetServiceProperties() (*ServiceProperties, error) { - return c.client.getServiceProperties(queueServiceName, c.auth) -} - -// SetServiceProperties sets the properties of your storage account's queue service. -// See: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/set-queue-service-properties -func (c *QueueServiceClient) SetServiceProperties(props ServiceProperties) error { - return c.client.setServiceProperties(props, queueServiceName, c.auth) -} diff --git a/vendor/github.com/Azure/azure-storage-go/share.go b/vendor/github.com/Azure/azure-storage-go/share.go deleted file mode 100644 index e190097ea5cd..000000000000 --- a/vendor/github.com/Azure/azure-storage-go/share.go +++ /dev/null @@ -1,186 +0,0 @@ -package storage - -import ( - "fmt" - "net/http" - "net/url" - "strconv" -) - -// Share represents an Azure file share. -type Share struct { - fsc *FileServiceClient - Name string `xml:"Name"` - Properties ShareProperties `xml:"Properties"` - Metadata map[string]string -} - -// ShareProperties contains various properties of a share. -type ShareProperties struct { - LastModified string `xml:"Last-Modified"` - Etag string `xml:"Etag"` - Quota int `xml:"Quota"` -} - -// builds the complete path for this share object. -func (s *Share) buildPath() string { - return fmt.Sprintf("/%s", s.Name) -} - -// Create this share under the associated account. -// If a share with the same name already exists, the operation fails. -// -// See https://msdn.microsoft.com/en-us/library/azure/dn167008.aspx -func (s *Share) Create() error { - headers, err := s.fsc.createResource(s.buildPath(), resourceShare, nil, mergeMDIntoExtraHeaders(s.Metadata, nil), []int{http.StatusCreated}) - if err != nil { - return err - } - - s.updateEtagAndLastModified(headers) - return nil -} - -// CreateIfNotExists creates this share under the associated account if -// it does not exist. Returns true if the share is newly created or false if -// the share already exists. -// -// See https://msdn.microsoft.com/en-us/library/azure/dn167008.aspx -func (s *Share) CreateIfNotExists() (bool, error) { - resp, err := s.fsc.createResourceNoClose(s.buildPath(), resourceShare, nil, nil) - if resp != nil { - defer readAndCloseBody(resp.body) - if resp.statusCode == http.StatusCreated || resp.statusCode == http.StatusConflict { - if resp.statusCode == http.StatusCreated { - s.updateEtagAndLastModified(resp.headers) - return true, nil - } - return false, s.FetchAttributes() - } - } - - return false, err -} - -// Delete marks this share for deletion. The share along with any files -// and directories contained within it are later deleted during garbage -// collection. If the share does not exist the operation fails -// -// See https://msdn.microsoft.com/en-us/library/azure/dn689090.aspx -func (s *Share) Delete() error { - return s.fsc.deleteResource(s.buildPath(), resourceShare) -} - -// DeleteIfExists operation marks this share for deletion if it exists. -// -// See https://msdn.microsoft.com/en-us/library/azure/dn689090.aspx -func (s *Share) DeleteIfExists() (bool, error) { - resp, err := s.fsc.deleteResourceNoClose(s.buildPath(), resourceShare) - if resp != nil { - defer readAndCloseBody(resp.body) - if resp.statusCode == http.StatusAccepted || resp.statusCode == http.StatusNotFound { - return resp.statusCode == http.StatusAccepted, nil - } - } - return false, err -} - -// Exists returns true if this share already exists -// on the storage account, otherwise returns false. -func (s *Share) Exists() (bool, error) { - exists, headers, err := s.fsc.resourceExists(s.buildPath(), resourceShare) - if exists { - s.updateEtagAndLastModified(headers) - s.updateQuota(headers) - } - return exists, err -} - -// FetchAttributes retrieves metadata and properties for this share. -func (s *Share) FetchAttributes() error { - headers, err := s.fsc.getResourceHeaders(s.buildPath(), compNone, resourceShare, http.MethodHead) - if err != nil { - return err - } - - s.updateEtagAndLastModified(headers) - s.updateQuota(headers) - s.Metadata = getMetadataFromHeaders(headers) - - return nil -} - -// GetRootDirectoryReference returns a Directory object at the root of this share. -func (s *Share) GetRootDirectoryReference() *Directory { - return &Directory{ - fsc: s.fsc, - share: s, - } -} - -// ServiceClient returns the FileServiceClient associated with this share. -func (s *Share) ServiceClient() *FileServiceClient { - return s.fsc -} - -// SetMetadata replaces the metadata for this share. -// -// Some keys may be converted to Camel-Case before sending. All keys -// are returned in lower case by GetShareMetadata. HTTP header names -// are case-insensitive so case munging should not matter to other -// applications either. -// -// See https://msdn.microsoft.com/en-us/library/azure/dd179414.aspx -func (s *Share) SetMetadata() error { - headers, err := s.fsc.setResourceHeaders(s.buildPath(), compMetadata, resourceShare, mergeMDIntoExtraHeaders(s.Metadata, nil)) - if err != nil { - return err - } - - s.updateEtagAndLastModified(headers) - return nil -} - -// SetProperties sets system properties for this share. -// -// Some keys may be converted to Camel-Case before sending. All keys -// are returned in lower case by SetShareProperties. HTTP header names -// are case-insensitive so case munging should not matter to other -// applications either. -// -// See https://msdn.microsoft.com/en-us/library/azure/mt427368.aspx -func (s *Share) SetProperties() error { - if s.Properties.Quota < 1 || s.Properties.Quota > 5120 { - return fmt.Errorf("invalid value %v for quota, valid values are [1, 5120]", s.Properties.Quota) - } - - headers, err := s.fsc.setResourceHeaders(s.buildPath(), compProperties, resourceShare, map[string]string{ - "x-ms-share-quota": strconv.Itoa(s.Properties.Quota), - }) - if err != nil { - return err - } - - s.updateEtagAndLastModified(headers) - return nil -} - -// updates Etag and last modified date -func (s *Share) updateEtagAndLastModified(headers http.Header) { - s.Properties.Etag = headers.Get("Etag") - s.Properties.LastModified = headers.Get("Last-Modified") -} - -// updates quota value -func (s *Share) updateQuota(headers http.Header) { - quota, err := strconv.Atoi(headers.Get("x-ms-share-quota")) - if err == nil { - s.Properties.Quota = quota - } -} - -// URL gets the canonical URL to this share. This method does not create a publicly accessible -// URL if the share is private and this method does not check if the share exists. -func (s *Share) URL() string { - return s.fsc.client.getEndpoint(fileServiceName, s.buildPath(), url.Values{}) -} diff --git a/vendor/github.com/Azure/azure-storage-go/storagepolicy.go b/vendor/github.com/Azure/azure-storage-go/storagepolicy.go deleted file mode 100644 index bee1c31ad61f..000000000000 --- a/vendor/github.com/Azure/azure-storage-go/storagepolicy.go +++ /dev/null @@ -1,47 +0,0 @@ -package storage - -import ( - "strings" - "time" -) - -// AccessPolicyDetailsXML has specifics about an access policy -// annotated with XML details. -type AccessPolicyDetailsXML struct { - StartTime time.Time `xml:"Start"` - ExpiryTime time.Time `xml:"Expiry"` - Permission string `xml:"Permission"` -} - -// SignedIdentifier is a wrapper for a specific policy -type SignedIdentifier struct { - ID string `xml:"Id"` - AccessPolicy AccessPolicyDetailsXML `xml:"AccessPolicy"` -} - -// SignedIdentifiers part of the response from GetPermissions call. -type SignedIdentifiers struct { - SignedIdentifiers []SignedIdentifier `xml:"SignedIdentifier"` -} - -// AccessPolicy is the response type from the GetPermissions call. -type AccessPolicy struct { - SignedIdentifiersList SignedIdentifiers `xml:"SignedIdentifiers"` -} - -// convertAccessPolicyToXMLStructs converts between AccessPolicyDetails which is a struct better for API usage to the -// AccessPolicy struct which will get converted to XML. -func convertAccessPolicyToXMLStructs(id string, startTime time.Time, expiryTime time.Time, permissions string) SignedIdentifier { - return SignedIdentifier{ - ID: id, - AccessPolicy: AccessPolicyDetailsXML{ - StartTime: startTime.UTC().Round(time.Second), - ExpiryTime: expiryTime.UTC().Round(time.Second), - Permission: permissions, - }, - } -} - -func updatePermissions(permissions, permission string) bool { - return strings.Contains(permissions, permission) -} diff --git a/vendor/github.com/Azure/azure-storage-go/storageservice.go b/vendor/github.com/Azure/azure-storage-go/storageservice.go deleted file mode 100644 index 817560b782bf..000000000000 --- a/vendor/github.com/Azure/azure-storage-go/storageservice.go +++ /dev/null @@ -1,118 +0,0 @@ -package storage - -import ( - "fmt" - "net/http" - "net/url" -) - -// ServiceProperties represents the storage account service properties -type ServiceProperties struct { - Logging *Logging - HourMetrics *Metrics - MinuteMetrics *Metrics - Cors *Cors -} - -// Logging represents the Azure Analytics Logging settings -type Logging struct { - Version string - Delete bool - Read bool - Write bool - RetentionPolicy *RetentionPolicy -} - -// RetentionPolicy indicates if retention is enabled and for how many days -type RetentionPolicy struct { - Enabled bool - Days *int -} - -// Metrics provide request statistics. -type Metrics struct { - Version string - Enabled bool - IncludeAPIs *bool - RetentionPolicy *RetentionPolicy -} - -// Cors includes all the CORS rules -type Cors struct { - CorsRule []CorsRule -} - -// CorsRule includes all settings for a Cors rule -type CorsRule struct { - AllowedOrigins string - AllowedMethods string - MaxAgeInSeconds int - ExposedHeaders string - AllowedHeaders string -} - -func (c Client) getServiceProperties(service string, auth authentication) (*ServiceProperties, error) { - query := url.Values{ - "restype": {"service"}, - "comp": {"properties"}, - } - uri := c.getEndpoint(service, "", query) - headers := c.getStandardHeaders() - - resp, err := c.exec(http.MethodGet, uri, headers, nil, auth) - if err != nil { - return nil, err - } - defer resp.body.Close() - - if err := checkRespCode(resp.statusCode, []int{http.StatusOK}); err != nil { - return nil, err - } - - var out ServiceProperties - err = xmlUnmarshal(resp.body, &out) - if err != nil { - return nil, err - } - - return &out, nil -} - -func (c Client) setServiceProperties(props ServiceProperties, service string, auth authentication) error { - query := url.Values{ - "restype": {"service"}, - "comp": {"properties"}, - } - uri := c.getEndpoint(service, "", query) - - // Ideally, StorageServiceProperties would be the output struct - // This is to avoid golint stuttering, while generating the correct XML - type StorageServiceProperties struct { - Logging *Logging - HourMetrics *Metrics - MinuteMetrics *Metrics - Cors *Cors - } - input := StorageServiceProperties{ - Logging: props.Logging, - HourMetrics: props.HourMetrics, - MinuteMetrics: props.MinuteMetrics, - Cors: props.Cors, - } - - body, length, err := xmlMarshal(input) - if err != nil { - return err - } - - headers := c.getStandardHeaders() - headers["Content-Length"] = fmt.Sprintf("%v", length) - - resp, err := c.exec(http.MethodPut, uri, headers, body, auth) - if err != nil { - return err - } - defer readAndCloseBody(resp.body) - - return checkRespCode(resp.statusCode, []int{http.StatusAccepted}) -} diff --git a/vendor/github.com/Azure/azure-storage-go/table.go b/vendor/github.com/Azure/azure-storage-go/table.go deleted file mode 100644 index 4123746e501d..000000000000 --- a/vendor/github.com/Azure/azure-storage-go/table.go +++ /dev/null @@ -1,254 +0,0 @@ -package storage - -import ( - "bytes" - "encoding/json" - "fmt" - "io" - "io/ioutil" - "net/http" - "net/url" - "strconv" - "time" -) - -// AzureTable is the typedef of the Azure Table name -type AzureTable string - -const ( - tablesURIPath = "/Tables" -) - -type createTableRequest struct { - TableName string `json:"TableName"` -} - -// TableAccessPolicy are used for SETTING table policies -type TableAccessPolicy struct { - ID string - StartTime time.Time - ExpiryTime time.Time - CanRead bool - CanAppend bool - CanUpdate bool - CanDelete bool -} - -func pathForTable(table AzureTable) string { return fmt.Sprintf("%s", table) } - -func (c *TableServiceClient) getStandardHeaders() map[string]string { - return map[string]string{ - "x-ms-version": "2015-02-21", - "x-ms-date": currentTimeRfc1123Formatted(), - "Accept": "application/json;odata=nometadata", - "Accept-Charset": "UTF-8", - "Content-Type": "application/json", - userAgentHeader: c.client.userAgent, - } -} - -// QueryTables returns the tables created in the -// *TableServiceClient storage account. -func (c *TableServiceClient) QueryTables() ([]AzureTable, error) { - uri := c.client.getEndpoint(tableServiceName, tablesURIPath, url.Values{}) - - headers := c.getStandardHeaders() - headers["Content-Length"] = "0" - - resp, err := c.client.execInternalJSON(http.MethodGet, uri, headers, nil, c.auth) - if err != nil { - return nil, err - } - defer resp.body.Close() - - if err := checkRespCode(resp.statusCode, []int{http.StatusOK}); err != nil { - ioutil.ReadAll(resp.body) - return nil, err - } - - buf := new(bytes.Buffer) - if _, err := buf.ReadFrom(resp.body); err != nil { - return nil, err - } - - var respArray queryTablesResponse - if err := json.Unmarshal(buf.Bytes(), &respArray); err != nil { - return nil, err - } - - s := make([]AzureTable, len(respArray.TableName)) - for i, elem := range respArray.TableName { - s[i] = AzureTable(elem.TableName) - } - - return s, nil -} - -// CreateTable creates the table given the specific -// name. This function fails if the name is not compliant -// with the specification or the tables already exists. -func (c *TableServiceClient) CreateTable(table AzureTable) error { - uri := c.client.getEndpoint(tableServiceName, tablesURIPath, url.Values{}) - - headers := c.getStandardHeaders() - - req := createTableRequest{TableName: string(table)} - buf := new(bytes.Buffer) - - if err := json.NewEncoder(buf).Encode(req); err != nil { - return err - } - - headers["Content-Length"] = fmt.Sprintf("%d", buf.Len()) - - resp, err := c.client.execInternalJSON(http.MethodPost, uri, headers, buf, c.auth) - - if err != nil { - return err - } - defer readAndCloseBody(resp.body) - - if err := checkRespCode(resp.statusCode, []int{http.StatusCreated}); err != nil { - return err - } - - return nil -} - -// DeleteTable deletes the table given the specific -// name. This function fails if the table is not present. -// Be advised: DeleteTable deletes all the entries -// that may be present. -func (c *TableServiceClient) DeleteTable(table AzureTable) error { - uri := c.client.getEndpoint(tableServiceName, tablesURIPath, url.Values{}) - uri += fmt.Sprintf("('%s')", string(table)) - - headers := c.getStandardHeaders() - - headers["Content-Length"] = "0" - - resp, err := c.client.execInternalJSON(http.MethodDelete, uri, headers, nil, c.auth) - - if err != nil { - return err - } - defer readAndCloseBody(resp.body) - - if err := checkRespCode(resp.statusCode, []int{http.StatusNoContent}); err != nil { - return err - - } - return nil -} - -// SetTablePermissions sets up table ACL permissions as per REST details https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Set-Table-ACL -func (c *TableServiceClient) SetTablePermissions(table AzureTable, policies []TableAccessPolicy, timeout uint) (err error) { - params := url.Values{"comp": {"acl"}} - - if timeout > 0 { - params.Add("timeout", fmt.Sprint(timeout)) - } - - uri := c.client.getEndpoint(tableServiceName, string(table), params) - headers := c.client.getStandardHeaders() - - body, length, err := generateTableACLPayload(policies) - if err != nil { - return err - } - headers["Content-Length"] = fmt.Sprintf("%v", length) - - resp, err := c.client.execInternalJSON(http.MethodPut, uri, headers, body, c.auth) - if err != nil { - return err - } - defer readAndCloseBody(resp.body) - - if err := checkRespCode(resp.statusCode, []int{http.StatusNoContent}); err != nil { - return err - } - return nil -} - -func generateTableACLPayload(policies []TableAccessPolicy) (io.Reader, int, error) { - sil := SignedIdentifiers{ - SignedIdentifiers: []SignedIdentifier{}, - } - for _, tap := range policies { - permission := generateTablePermissions(&tap) - signedIdentifier := convertAccessPolicyToXMLStructs(tap.ID, tap.StartTime, tap.ExpiryTime, permission) - sil.SignedIdentifiers = append(sil.SignedIdentifiers, signedIdentifier) - } - return xmlMarshal(sil) -} - -// GetTablePermissions gets the table ACL permissions, as per REST details https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/get-table-acl -func (c *TableServiceClient) GetTablePermissions(table AzureTable, timeout int) (permissionResponse []TableAccessPolicy, err error) { - params := url.Values{"comp": {"acl"}} - - if timeout > 0 { - params.Add("timeout", strconv.Itoa(timeout)) - } - - uri := c.client.getEndpoint(tableServiceName, string(table), params) - headers := c.client.getStandardHeaders() - resp, err := c.client.execInternalJSON(http.MethodGet, uri, headers, nil, c.auth) - if err != nil { - return nil, err - } - defer resp.body.Close() - - if err = checkRespCode(resp.statusCode, []int{http.StatusOK}); err != nil { - ioutil.ReadAll(resp.body) - return nil, err - } - - var ap AccessPolicy - err = xmlUnmarshal(resp.body, &ap.SignedIdentifiersList) - if err != nil { - return nil, err - } - out := updateTableAccessPolicy(ap) - return out, nil -} - -func updateTableAccessPolicy(ap AccessPolicy) []TableAccessPolicy { - out := []TableAccessPolicy{} - for _, policy := range ap.SignedIdentifiersList.SignedIdentifiers { - tap := TableAccessPolicy{ - ID: policy.ID, - StartTime: policy.AccessPolicy.StartTime, - ExpiryTime: policy.AccessPolicy.ExpiryTime, - } - tap.CanRead = updatePermissions(policy.AccessPolicy.Permission, "r") - tap.CanAppend = updatePermissions(policy.AccessPolicy.Permission, "a") - tap.CanUpdate = updatePermissions(policy.AccessPolicy.Permission, "u") - tap.CanDelete = updatePermissions(policy.AccessPolicy.Permission, "d") - - out = append(out, tap) - } - return out -} - -func generateTablePermissions(tap *TableAccessPolicy) (permissions string) { - // generate the permissions string (raud). - // still want the end user API to have bool flags. - permissions = "" - - if tap.CanRead { - permissions += "r" - } - - if tap.CanAppend { - permissions += "a" - } - - if tap.CanUpdate { - permissions += "u" - } - - if tap.CanDelete { - permissions += "d" - } - return permissions -} diff --git a/vendor/github.com/Azure/azure-storage-go/table_entities.go b/vendor/github.com/Azure/azure-storage-go/table_entities.go deleted file mode 100644 index 36413a0cffd1..000000000000 --- a/vendor/github.com/Azure/azure-storage-go/table_entities.go +++ /dev/null @@ -1,354 +0,0 @@ -package storage - -import ( - "bytes" - "encoding/json" - "fmt" - "io" - "net/http" - "net/url" - "reflect" -) - -// Annotating as secure for gas scanning -/* #nosec */ -const ( - partitionKeyNode = "PartitionKey" - rowKeyNode = "RowKey" - tag = "table" - tagIgnore = "-" - continuationTokenPartitionKeyHeader = "X-Ms-Continuation-Nextpartitionkey" - continuationTokenRowHeader = "X-Ms-Continuation-Nextrowkey" - maxTopParameter = 1000 -) - -type queryTablesResponse struct { - TableName []struct { - TableName string `json:"TableName"` - } `json:"value"` -} - -const ( - tableOperationTypeInsert = iota - tableOperationTypeUpdate = iota - tableOperationTypeMerge = iota - tableOperationTypeInsertOrReplace = iota - tableOperationTypeInsertOrMerge = iota -) - -type tableOperation int - -// TableEntity interface specifies -// the functions needed to support -// marshaling and unmarshaling into -// Azure Tables. The struct must only contain -// simple types because Azure Tables do not -// support hierarchy. -type TableEntity interface { - PartitionKey() string - RowKey() string - SetPartitionKey(string) error - SetRowKey(string) error -} - -// ContinuationToken is an opaque (ie not useful to inspect) -// struct that Get... methods can return if there are more -// entries to be returned than the ones already -// returned. Just pass it to the same function to continue -// receiving the remaining entries. -type ContinuationToken struct { - NextPartitionKey string - NextRowKey string -} - -type getTableEntriesResponse struct { - Elements []map[string]interface{} `json:"value"` -} - -// QueryTableEntities queries the specified table and returns the unmarshaled -// entities of type retType. -// top parameter limits the returned entries up to top. Maximum top -// allowed by Azure API is 1000. In case there are more than top entries to be -// returned the function will return a non nil *ContinuationToken. You can call the -// same function again passing the received ContinuationToken as previousContToken -// parameter in order to get the following entries. The query parameter -// is the odata query. To retrieve all the entries pass the empty string. -// The function returns a pointer to a TableEntity slice, the *ContinuationToken -// if there are more entries to be returned and an error in case something went -// wrong. -// -// Example: -// entities, cToken, err = tSvc.QueryTableEntities("table", cToken, reflect.TypeOf(entity), 20, "") -func (c *TableServiceClient) QueryTableEntities(tableName AzureTable, previousContToken *ContinuationToken, retType reflect.Type, top int, query string) ([]TableEntity, *ContinuationToken, error) { - if top > maxTopParameter { - return nil, nil, fmt.Errorf("top accepts at maximum %d elements. Requested %d instead", maxTopParameter, top) - } - - uri := c.client.getEndpoint(tableServiceName, pathForTable(tableName), url.Values{}) - uri += fmt.Sprintf("?$top=%d", top) - if query != "" { - uri += fmt.Sprintf("&$filter=%s", url.QueryEscape(query)) - } - - if previousContToken != nil { - uri += fmt.Sprintf("&NextPartitionKey=%s&NextRowKey=%s", previousContToken.NextPartitionKey, previousContToken.NextRowKey) - } - - headers := c.getStandardHeaders() - - headers["Content-Length"] = "0" - - resp, err := c.client.execInternalJSON(http.MethodGet, uri, headers, nil, c.auth) - - if err != nil { - return nil, nil, err - } - - contToken := extractContinuationTokenFromHeaders(resp.headers) - - defer resp.body.Close() - - if err = checkRespCode(resp.statusCode, []int{http.StatusOK}); err != nil { - return nil, contToken, err - } - - retEntries, err := deserializeEntity(retType, resp.body) - if err != nil { - return nil, contToken, err - } - - return retEntries, contToken, nil -} - -// InsertEntity inserts an entity in the specified table. -// The function fails if there is an entity with the same -// PartitionKey and RowKey in the table. -func (c *TableServiceClient) InsertEntity(table AzureTable, entity TableEntity) error { - sc, err := c.execTable(table, entity, false, http.MethodPost) - if err != nil { - return err - } - - return checkRespCode(sc, []int{http.StatusCreated}) -} - -func (c *TableServiceClient) execTable(table AzureTable, entity TableEntity, specifyKeysInURL bool, method string) (int, error) { - uri := c.client.getEndpoint(tableServiceName, pathForTable(table), url.Values{}) - if specifyKeysInURL { - uri += fmt.Sprintf("(PartitionKey='%s',RowKey='%s')", url.QueryEscape(entity.PartitionKey()), url.QueryEscape(entity.RowKey())) - } - - headers := c.getStandardHeaders() - - var buf bytes.Buffer - - if err := injectPartitionAndRowKeys(entity, &buf); err != nil { - return 0, err - } - - headers["Content-Length"] = fmt.Sprintf("%d", buf.Len()) - - resp, err := c.client.execInternalJSON(method, uri, headers, &buf, c.auth) - - if err != nil { - return 0, err - } - - defer resp.body.Close() - - return resp.statusCode, nil -} - -// UpdateEntity updates the contents of an entity with the -// one passed as parameter. The function fails if there is no entity -// with the same PartitionKey and RowKey in the table. -func (c *TableServiceClient) UpdateEntity(table AzureTable, entity TableEntity) error { - sc, err := c.execTable(table, entity, true, http.MethodPut) - if err != nil { - return err - } - - return checkRespCode(sc, []int{http.StatusNoContent}) -} - -// MergeEntity merges the contents of an entity with the -// one passed as parameter. -// The function fails if there is no entity -// with the same PartitionKey and RowKey in the table. -func (c *TableServiceClient) MergeEntity(table AzureTable, entity TableEntity) error { - sc, err := c.execTable(table, entity, true, "MERGE") - if err != nil { - return err - } - - return checkRespCode(sc, []int{http.StatusNoContent}) -} - -// DeleteEntityWithoutCheck deletes the entity matching by -// PartitionKey and RowKey. There is no check on IfMatch -// parameter so the entity is always deleted. -// The function fails if there is no entity -// with the same PartitionKey and RowKey in the table. -func (c *TableServiceClient) DeleteEntityWithoutCheck(table AzureTable, entity TableEntity) error { - return c.DeleteEntity(table, entity, "*") -} - -// DeleteEntity deletes the entity matching by -// PartitionKey, RowKey and ifMatch field. -// The function fails if there is no entity -// with the same PartitionKey and RowKey in the table or -// the ifMatch is different. -func (c *TableServiceClient) DeleteEntity(table AzureTable, entity TableEntity, ifMatch string) error { - uri := c.client.getEndpoint(tableServiceName, pathForTable(table), url.Values{}) - uri += fmt.Sprintf("(PartitionKey='%s',RowKey='%s')", url.QueryEscape(entity.PartitionKey()), url.QueryEscape(entity.RowKey())) - - headers := c.getStandardHeaders() - - headers["Content-Length"] = "0" - headers["If-Match"] = ifMatch - - resp, err := c.client.execInternalJSON(http.MethodDelete, uri, headers, nil, c.auth) - - if err != nil { - return err - } - defer resp.body.Close() - - if err := checkRespCode(resp.statusCode, []int{http.StatusNoContent}); err != nil { - return err - } - - return nil -} - -// InsertOrReplaceEntity inserts an entity in the specified table -// or replaced the existing one. -func (c *TableServiceClient) InsertOrReplaceEntity(table AzureTable, entity TableEntity) error { - sc, err := c.execTable(table, entity, true, http.MethodPut) - if err != nil { - return err - } - - return checkRespCode(sc, []int{http.StatusNoContent}) -} - -// InsertOrMergeEntity inserts an entity in the specified table -// or merges the existing one. -func (c *TableServiceClient) InsertOrMergeEntity(table AzureTable, entity TableEntity) error { - sc, err := c.execTable(table, entity, true, "MERGE") - if err != nil { - return err - } - - return checkRespCode(sc, []int{http.StatusNoContent}) -} - -func injectPartitionAndRowKeys(entity TableEntity, buf *bytes.Buffer) error { - if err := json.NewEncoder(buf).Encode(entity); err != nil { - return err - } - - dec := make(map[string]interface{}) - if err := json.NewDecoder(buf).Decode(&dec); err != nil { - return err - } - - // Inject PartitionKey and RowKey - dec[partitionKeyNode] = entity.PartitionKey() - dec[rowKeyNode] = entity.RowKey() - - // Remove tagged fields - // The tag is defined in the const section - // This is useful to avoid storing the PartitionKey and RowKey twice. - numFields := reflect.ValueOf(entity).Elem().NumField() - for i := 0; i < numFields; i++ { - f := reflect.ValueOf(entity).Elem().Type().Field(i) - - if f.Tag.Get(tag) == tagIgnore { - // we must look for its JSON name in the dictionary - // as the user can rename it using a tag - jsonName := f.Name - if f.Tag.Get("json") != "" { - jsonName = f.Tag.Get("json") - } - delete(dec, jsonName) - } - } - - buf.Reset() - - if err := json.NewEncoder(buf).Encode(&dec); err != nil { - return err - } - - return nil -} - -func deserializeEntity(retType reflect.Type, reader io.Reader) ([]TableEntity, error) { - buf := new(bytes.Buffer) - - var ret getTableEntriesResponse - if err := json.NewDecoder(reader).Decode(&ret); err != nil { - return nil, err - } - - tEntries := make([]TableEntity, len(ret.Elements)) - - for i, entry := range ret.Elements { - - buf.Reset() - if err := json.NewEncoder(buf).Encode(entry); err != nil { - return nil, err - } - - dec := make(map[string]interface{}) - if err := json.NewDecoder(buf).Decode(&dec); err != nil { - return nil, err - } - - var pKey, rKey string - // strip pk and rk - for key, val := range dec { - switch key { - case partitionKeyNode: - pKey = val.(string) - case rowKeyNode: - rKey = val.(string) - } - } - - delete(dec, partitionKeyNode) - delete(dec, rowKeyNode) - - buf.Reset() - if err := json.NewEncoder(buf).Encode(dec); err != nil { - return nil, err - } - - // Create a empty retType instance - tEntries[i] = reflect.New(retType.Elem()).Interface().(TableEntity) - // Popolate it with the values - if err := json.NewDecoder(buf).Decode(&tEntries[i]); err != nil { - return nil, err - } - - // Reset PartitionKey and RowKey - if err := tEntries[i].SetPartitionKey(pKey); err != nil { - return nil, err - } - if err := tEntries[i].SetRowKey(rKey); err != nil { - return nil, err - } - } - - return tEntries, nil -} - -func extractContinuationTokenFromHeaders(h http.Header) *ContinuationToken { - ct := ContinuationToken{h.Get(continuationTokenPartitionKeyHeader), h.Get(continuationTokenRowHeader)} - - if ct.NextPartitionKey != "" && ct.NextRowKey != "" { - return &ct - } - return nil -} diff --git a/vendor/github.com/Azure/azure-storage-go/tableserviceclient.go b/vendor/github.com/Azure/azure-storage-go/tableserviceclient.go deleted file mode 100644 index ee5e0a8671b7..000000000000 --- a/vendor/github.com/Azure/azure-storage-go/tableserviceclient.go +++ /dev/null @@ -1,20 +0,0 @@ -package storage - -// TableServiceClient contains operations for Microsoft Azure Table Storage -// Service. -type TableServiceClient struct { - client Client - auth authentication -} - -// GetServiceProperties gets the properties of your storage account's table service. -// See: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/get-table-service-properties -func (c *TableServiceClient) GetServiceProperties() (*ServiceProperties, error) { - return c.client.getServiceProperties(tableServiceName, c.auth) -} - -// SetServiceProperties sets the properties of your storage account's table service. -// See: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/set-table-service-properties -func (c *TableServiceClient) SetServiceProperties(props ServiceProperties) error { - return c.client.setServiceProperties(props, tableServiceName, c.auth) -} diff --git a/vendor/github.com/Azure/azure-storage-go/util.go b/vendor/github.com/Azure/azure-storage-go/util.go deleted file mode 100644 index 57ca1b6d937e..000000000000 --- a/vendor/github.com/Azure/azure-storage-go/util.go +++ /dev/null @@ -1,85 +0,0 @@ -package storage - -import ( - "bytes" - "crypto/hmac" - "crypto/sha256" - "encoding/base64" - "encoding/xml" - "fmt" - "io" - "io/ioutil" - "net/http" - "net/url" - "reflect" - "time" -) - -func (c Client) computeHmac256(message string) string { - h := hmac.New(sha256.New, c.accountKey) - h.Write([]byte(message)) - return base64.StdEncoding.EncodeToString(h.Sum(nil)) -} - -func currentTimeRfc1123Formatted() string { - return timeRfc1123Formatted(time.Now().UTC()) -} - -func timeRfc1123Formatted(t time.Time) string { - return t.Format(http.TimeFormat) -} - -func mergeParams(v1, v2 url.Values) url.Values { - out := url.Values{} - for k, v := range v1 { - out[k] = v - } - for k, v := range v2 { - vals, ok := out[k] - if ok { - vals = append(vals, v...) - out[k] = vals - } else { - out[k] = v - } - } - return out -} - -func prepareBlockListRequest(blocks []Block) string { - s := `` - for _, v := range blocks { - s += fmt.Sprintf("<%s>%s", v.Status, v.ID, v.Status) - } - s += `` - return s -} - -func xmlUnmarshal(body io.Reader, v interface{}) error { - data, err := ioutil.ReadAll(body) - if err != nil { - return err - } - return xml.Unmarshal(data, v) -} - -func xmlMarshal(v interface{}) (io.Reader, int, error) { - b, err := xml.Marshal(v) - if err != nil { - return nil, 0, err - } - return bytes.NewReader(b), len(b), nil -} - -func headersFromStruct(v interface{}) map[string]string { - headers := make(map[string]string) - value := reflect.ValueOf(v) - for i := 0; i < value.NumField(); i++ { - key := value.Type().Field(i).Tag.Get("header") - val := value.Field(i).String() - if key != "" && val != "" { - headers[key] = val - } - } - return headers -} diff --git a/vendor/github.com/Azure/azure-storage-go/version.go b/vendor/github.com/Azure/azure-storage-go/version.go deleted file mode 100644 index c25fe3371344..000000000000 --- a/vendor/github.com/Azure/azure-storage-go/version.go +++ /dev/null @@ -1,5 +0,0 @@ -package storage - -var ( - sdkVersion = "0.1.0" -) diff --git a/vendor/github.com/Azure/go-autorest/autorest/autorest.go b/vendor/github.com/Azure/go-autorest/autorest/autorest.go deleted file mode 100644 index 51f1c4bbcac2..000000000000 --- a/vendor/github.com/Azure/go-autorest/autorest/autorest.go +++ /dev/null @@ -1,115 +0,0 @@ -/* -Package autorest implements an HTTP request pipeline suitable for use across multiple go-routines -and provides the shared routines relied on by AutoRest (see https://github.com/Azure/autorest/) -generated Go code. - -The package breaks sending and responding to HTTP requests into three phases: Preparing, Sending, -and Responding. A typical pattern is: - - req, err := Prepare(&http.Request{}, - token.WithAuthorization()) - - resp, err := Send(req, - WithLogging(logger), - DoErrorIfStatusCode(http.StatusInternalServerError), - DoCloseIfError(), - DoRetryForAttempts(5, time.Second)) - - err = Respond(resp, - ByDiscardingBody(), - ByClosing()) - -Each phase relies on decorators to modify and / or manage processing. Decorators may first modify -and then pass the data along, pass the data first and then modify the result, or wrap themselves -around passing the data (such as a logger might do). Decorators run in the order provided. For -example, the following: - - req, err := Prepare(&http.Request{}, - WithBaseURL("https://microsoft.com/"), - WithPath("a"), - WithPath("b"), - WithPath("c")) - -will set the URL to: - - https://microsoft.com/a/b/c - -Preparers and Responders may be shared and re-used (assuming the underlying decorators support -sharing and re-use). Performant use is obtained by creating one or more Preparers and Responders -shared among multiple go-routines, and a single Sender shared among multiple sending go-routines, -all bound together by means of input / output channels. - -Decorators hold their passed state within a closure (such as the path components in the example -above). Be careful to share Preparers and Responders only in a context where such held state -applies. For example, it may not make sense to share a Preparer that applies a query string from a -fixed set of values. Similarly, sharing a Responder that reads the response body into a passed -struct (e.g., ByUnmarshallingJson) is likely incorrect. - -Lastly, the Swagger specification (https://swagger.io) that drives AutoRest -(https://github.com/Azure/autorest/) precisely defines two date forms: date and date-time. The -github.com/Azure/go-autorest/autorest/date package provides time.Time derivations to ensure -correct parsing and formatting. - -Errors raised by autorest objects and methods will conform to the autorest.Error interface. - -See the included examples for more detail. For details on the suggested use of this package by -generated clients, see the Client described below. -*/ -package autorest - -import ( - "net/http" - "time" -) - -const ( - // HeaderLocation specifies the HTTP Location header. - HeaderLocation = "Location" - - // HeaderRetryAfter specifies the HTTP Retry-After header. - HeaderRetryAfter = "Retry-After" -) - -// ResponseHasStatusCode returns true if the status code in the HTTP Response is in the passed set -// and false otherwise. -func ResponseHasStatusCode(resp *http.Response, codes ...int) bool { - return containsInt(codes, resp.StatusCode) -} - -// GetLocation retrieves the URL from the Location header of the passed response. -func GetLocation(resp *http.Response) string { - return resp.Header.Get(HeaderLocation) -} - -// GetRetryAfter extracts the retry delay from the Retry-After header of the passed response. If -// the header is absent or is malformed, it will return the supplied default delay time.Duration. -func GetRetryAfter(resp *http.Response, defaultDelay time.Duration) time.Duration { - retry := resp.Header.Get(HeaderRetryAfter) - if retry == "" { - return defaultDelay - } - - d, err := time.ParseDuration(retry + "s") - if err != nil { - return defaultDelay - } - - return d -} - -// NewPollingRequest allocates and returns a new http.Request to poll for the passed response. -func NewPollingRequest(resp *http.Response, cancel <-chan struct{}) (*http.Request, error) { - location := GetLocation(resp) - if location == "" { - return nil, NewErrorWithResponse("autorest", "NewPollingRequest", resp, "Location header missing from response that requires polling") - } - - req, err := Prepare(&http.Request{Cancel: cancel}, - AsGet(), - WithBaseURL(location)) - if err != nil { - return nil, NewErrorWithError(err, "autorest", "NewPollingRequest", nil, "Failure creating poll request to %s", location) - } - - return req, nil -} diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/async.go b/vendor/github.com/Azure/go-autorest/autorest/azure/async.go deleted file mode 100644 index 6e076981f2ba..000000000000 --- a/vendor/github.com/Azure/go-autorest/autorest/azure/async.go +++ /dev/null @@ -1,308 +0,0 @@ -package azure - -import ( - "bytes" - "fmt" - "io/ioutil" - "net/http" - "strings" - "time" - - "github.com/Azure/go-autorest/autorest" - "github.com/Azure/go-autorest/autorest/date" -) - -const ( - headerAsyncOperation = "Azure-AsyncOperation" -) - -const ( - methodDelete = "DELETE" - methodPatch = "PATCH" - methodPost = "POST" - methodPut = "PUT" - methodGet = "GET" - - operationInProgress string = "InProgress" - operationCanceled string = "Canceled" - operationFailed string = "Failed" - operationSucceeded string = "Succeeded" -) - -// DoPollForAsynchronous returns a SendDecorator that polls if the http.Response is for an Azure -// long-running operation. It will delay between requests for the duration specified in the -// RetryAfter header or, if the header is absent, the passed delay. Polling may be canceled by -// closing the optional channel on the http.Request. -func DoPollForAsynchronous(delay time.Duration) autorest.SendDecorator { - return func(s autorest.Sender) autorest.Sender { - return autorest.SenderFunc(func(r *http.Request) (resp *http.Response, err error) { - resp, err = s.Do(r) - if err != nil { - return resp, err - } - pollingCodes := []int{http.StatusAccepted, http.StatusCreated, http.StatusOK} - if !autorest.ResponseHasStatusCode(resp, pollingCodes...) { - return resp, nil - } - - ps := pollingState{} - for err == nil { - err = updatePollingState(resp, &ps) - if err != nil { - break - } - if ps.hasTerminated() { - if !ps.hasSucceeded() { - err = ps - } - break - } - - r, err = newPollingRequest(resp, ps) - if err != nil { - return resp, err - } - - delay = autorest.GetRetryAfter(resp, delay) - resp, err = autorest.SendWithSender(s, r, - autorest.AfterDelay(delay)) - } - - return resp, err - }) - } -} - -func getAsyncOperation(resp *http.Response) string { - return resp.Header.Get(http.CanonicalHeaderKey(headerAsyncOperation)) -} - -func hasSucceeded(state string) bool { - return state == operationSucceeded -} - -func hasTerminated(state string) bool { - switch state { - case operationCanceled, operationFailed, operationSucceeded: - return true - default: - return false - } -} - -func hasFailed(state string) bool { - return state == operationFailed -} - -type provisioningTracker interface { - state() string - hasSucceeded() bool - hasTerminated() bool -} - -type operationResource struct { - // Note: - // The specification states services should return the "id" field. However some return it as - // "operationId". - ID string `json:"id"` - OperationID string `json:"operationId"` - Name string `json:"name"` - Status string `json:"status"` - Properties map[string]interface{} `json:"properties"` - OperationError ServiceError `json:"error"` - StartTime date.Time `json:"startTime"` - EndTime date.Time `json:"endTime"` - PercentComplete float64 `json:"percentComplete"` -} - -func (or operationResource) state() string { - return or.Status -} - -func (or operationResource) hasSucceeded() bool { - return hasSucceeded(or.state()) -} - -func (or operationResource) hasTerminated() bool { - return hasTerminated(or.state()) -} - -type provisioningProperties struct { - ProvisioningState string `json:"provisioningState"` -} - -type provisioningStatus struct { - Properties provisioningProperties `json:"properties,omitempty"` - ProvisioningError ServiceError `json:"error,omitempty"` -} - -func (ps provisioningStatus) state() string { - return ps.Properties.ProvisioningState -} - -func (ps provisioningStatus) hasSucceeded() bool { - return hasSucceeded(ps.state()) -} - -func (ps provisioningStatus) hasTerminated() bool { - return hasTerminated(ps.state()) -} - -func (ps provisioningStatus) hasProvisioningError() bool { - return ps.ProvisioningError != ServiceError{} -} - -type pollingResponseFormat string - -const ( - usesOperationResponse pollingResponseFormat = "OperationResponse" - usesProvisioningStatus pollingResponseFormat = "ProvisioningStatus" - formatIsUnknown pollingResponseFormat = "" -) - -type pollingState struct { - responseFormat pollingResponseFormat - uri string - state string - code string - message string -} - -func (ps pollingState) hasSucceeded() bool { - return hasSucceeded(ps.state) -} - -func (ps pollingState) hasTerminated() bool { - return hasTerminated(ps.state) -} - -func (ps pollingState) hasFailed() bool { - return hasFailed(ps.state) -} - -func (ps pollingState) Error() string { - return fmt.Sprintf("Long running operation terminated with status '%s': Code=%q Message=%q", ps.state, ps.code, ps.message) -} - -// updatePollingState maps the operation status -- retrieved from either a provisioningState -// field, the status field of an OperationResource, or inferred from the HTTP status code -- -// into a well-known states. Since the process begins from the initial request, the state -// always comes from either a the provisioningState returned or is inferred from the HTTP -// status code. Subsequent requests will read an Azure OperationResource object if the -// service initially returned the Azure-AsyncOperation header. The responseFormat field notes -// the expected response format. -func updatePollingState(resp *http.Response, ps *pollingState) error { - // Determine the response shape - // -- The first response will always be a provisioningStatus response; only the polling requests, - // depending on the header returned, may be something otherwise. - var pt provisioningTracker - if ps.responseFormat == usesOperationResponse { - pt = &operationResource{} - } else { - pt = &provisioningStatus{} - } - - // If this is the first request (that is, the polling response shape is unknown), determine how - // to poll and what to expect - if ps.responseFormat == formatIsUnknown { - req := resp.Request - if req == nil { - return autorest.NewError("azure", "updatePollingState", "Azure Polling Error - Original HTTP request is missing") - } - - // Prefer the Azure-AsyncOperation header - ps.uri = getAsyncOperation(resp) - if ps.uri != "" { - ps.responseFormat = usesOperationResponse - } else { - ps.responseFormat = usesProvisioningStatus - } - - // Else, use the Location header - if ps.uri == "" { - ps.uri = autorest.GetLocation(resp) - } - - // Lastly, requests against an existing resource, use the last request URI - if ps.uri == "" { - m := strings.ToUpper(req.Method) - if m == methodPatch || m == methodPut || m == methodGet { - ps.uri = req.URL.String() - } - } - } - - // Read and interpret the response (saving the Body in case no polling is necessary) - b := &bytes.Buffer{} - err := autorest.Respond(resp, - autorest.ByCopying(b), - autorest.ByUnmarshallingJSON(pt), - autorest.ByClosing()) - resp.Body = ioutil.NopCloser(b) - if err != nil { - return err - } - - // Interpret the results - // -- Terminal states apply regardless - // -- Unknown states are per-service inprogress states - // -- Otherwise, infer state from HTTP status code - if pt.hasTerminated() { - ps.state = pt.state() - } else if pt.state() != "" { - ps.state = operationInProgress - } else { - switch resp.StatusCode { - case http.StatusAccepted: - ps.state = operationInProgress - - case http.StatusNoContent, http.StatusCreated, http.StatusOK: - ps.state = operationSucceeded - - default: - ps.state = operationFailed - } - } - - if ps.state == operationInProgress && ps.uri == "" { - return autorest.NewError("azure", "updatePollingState", "Azure Polling Error - Unable to obtain polling URI for %s %s", resp.Request.Method, resp.Request.URL) - } - - // For failed operation, check for error code and message in - // -- Operation resource - // -- Response - // -- Otherwise, Unknown - if ps.hasFailed() { - if ps.responseFormat == usesOperationResponse { - or := pt.(*operationResource) - ps.code = or.OperationError.Code - ps.message = or.OperationError.Message - } else { - p := pt.(*provisioningStatus) - if p.hasProvisioningError() { - ps.code = p.ProvisioningError.Code - ps.message = p.ProvisioningError.Message - } else { - ps.code = "Unknown" - ps.message = "None" - } - } - } - return nil -} - -func newPollingRequest(resp *http.Response, ps pollingState) (*http.Request, error) { - req := resp.Request - if req == nil { - return nil, autorest.NewError("azure", "newPollingRequest", "Azure Polling Error - Original HTTP request is missing") - } - - reqPoll, err := autorest.Prepare(&http.Request{Cancel: req.Cancel}, - autorest.AsGet(), - autorest.WithBaseURL(ps.uri)) - if err != nil { - return nil, autorest.NewErrorWithError(err, "azure", "newPollingRequest", nil, "Failure creating poll request to %s", ps.uri) - } - - return reqPoll, nil -} diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/azure.go b/vendor/github.com/Azure/go-autorest/autorest/azure/azure.go deleted file mode 100644 index 3f4d13421aac..000000000000 --- a/vendor/github.com/Azure/go-autorest/autorest/azure/azure.go +++ /dev/null @@ -1,180 +0,0 @@ -/* -Package azure provides Azure-specific implementations used with AutoRest. - -See the included examples for more detail. -*/ -package azure - -import ( - "encoding/json" - "fmt" - "io/ioutil" - "net/http" - "strconv" - - "github.com/Azure/go-autorest/autorest" -) - -const ( - // HeaderClientID is the Azure extension header to set a user-specified request ID. - HeaderClientID = "x-ms-client-request-id" - - // HeaderReturnClientID is the Azure extension header to set if the user-specified request ID - // should be included in the response. - HeaderReturnClientID = "x-ms-return-client-request-id" - - // HeaderRequestID is the Azure extension header of the service generated request ID returned - // in the response. - HeaderRequestID = "x-ms-request-id" -) - -// ServiceError encapsulates the error response from an Azure service. -type ServiceError struct { - Code string `json:"code"` - Message string `json:"message"` - Details *[]interface{} `json:"details"` -} - -func (se ServiceError) Error() string { - if se.Details != nil { - d, err := json.Marshal(*(se.Details)) - if err != nil { - return fmt.Sprintf("Code=%q Message=%q Details=%v", se.Code, se.Message, *se.Details) - } - return fmt.Sprintf("Code=%q Message=%q Details=%v", se.Code, se.Message, string(d)) - } - return fmt.Sprintf("Code=%q Message=%q", se.Code, se.Message) -} - -// RequestError describes an error response returned by Azure service. -type RequestError struct { - autorest.DetailedError - - // The error returned by the Azure service. - ServiceError *ServiceError `json:"error"` - - // The request id (from the x-ms-request-id-header) of the request. - RequestID string -} - -// Error returns a human-friendly error message from service error. -func (e RequestError) Error() string { - return fmt.Sprintf("autorest/azure: Service returned an error. Status=%v %v", - e.StatusCode, e.ServiceError) -} - -// IsAzureError returns true if the passed error is an Azure Service error; false otherwise. -func IsAzureError(e error) bool { - _, ok := e.(*RequestError) - return ok -} - -// NewErrorWithError creates a new Error conforming object from the -// passed packageType, method, statusCode of the given resp (UndefinedStatusCode -// if resp is nil), message, and original error. message is treated as a format -// string to which the optional args apply. -func NewErrorWithError(original error, packageType string, method string, resp *http.Response, message string, args ...interface{}) RequestError { - if v, ok := original.(*RequestError); ok { - return *v - } - - statusCode := autorest.UndefinedStatusCode - if resp != nil { - statusCode = resp.StatusCode - } - return RequestError{ - DetailedError: autorest.DetailedError{ - Original: original, - PackageType: packageType, - Method: method, - StatusCode: statusCode, - Message: fmt.Sprintf(message, args...), - }, - } -} - -// WithReturningClientID returns a PrepareDecorator that adds an HTTP extension header of -// x-ms-client-request-id whose value is the passed, undecorated UUID (e.g., -// "0F39878C-5F76-4DB8-A25D-61D2C193C3CA"). It also sets the x-ms-return-client-request-id -// header to true such that UUID accompanies the http.Response. -func WithReturningClientID(uuid string) autorest.PrepareDecorator { - preparer := autorest.CreatePreparer( - WithClientID(uuid), - WithReturnClientID(true)) - - return func(p autorest.Preparer) autorest.Preparer { - return autorest.PreparerFunc(func(r *http.Request) (*http.Request, error) { - r, err := p.Prepare(r) - if err != nil { - return r, err - } - return preparer.Prepare(r) - }) - } -} - -// WithClientID returns a PrepareDecorator that adds an HTTP extension header of -// x-ms-client-request-id whose value is passed, undecorated UUID (e.g., -// "0F39878C-5F76-4DB8-A25D-61D2C193C3CA"). -func WithClientID(uuid string) autorest.PrepareDecorator { - return autorest.WithHeader(HeaderClientID, uuid) -} - -// WithReturnClientID returns a PrepareDecorator that adds an HTTP extension header of -// x-ms-return-client-request-id whose boolean value indicates if the value of the -// x-ms-client-request-id header should be included in the http.Response. -func WithReturnClientID(b bool) autorest.PrepareDecorator { - return autorest.WithHeader(HeaderReturnClientID, strconv.FormatBool(b)) -} - -// ExtractClientID extracts the client identifier from the x-ms-client-request-id header set on the -// http.Request sent to the service (and returned in the http.Response) -func ExtractClientID(resp *http.Response) string { - return autorest.ExtractHeaderValue(HeaderClientID, resp) -} - -// ExtractRequestID extracts the Azure server generated request identifier from the -// x-ms-request-id header. -func ExtractRequestID(resp *http.Response) string { - return autorest.ExtractHeaderValue(HeaderRequestID, resp) -} - -// WithErrorUnlessStatusCode returns a RespondDecorator that emits an -// azure.RequestError by reading the response body unless the response HTTP status code -// is among the set passed. -// -// If there is a chance service may return responses other than the Azure error -// format and the response cannot be parsed into an error, a decoding error will -// be returned containing the response body. In any case, the Responder will -// return an error if the status code is not satisfied. -// -// If this Responder returns an error, the response body will be replaced with -// an in-memory reader, which needs no further closing. -func WithErrorUnlessStatusCode(codes ...int) autorest.RespondDecorator { - return func(r autorest.Responder) autorest.Responder { - return autorest.ResponderFunc(func(resp *http.Response) error { - err := r.Respond(resp) - if err == nil && !autorest.ResponseHasStatusCode(resp, codes...) { - var e RequestError - defer resp.Body.Close() - - // Copy and replace the Body in case it does not contain an error object. - // This will leave the Body available to the caller. - b, decodeErr := autorest.CopyAndDecode(autorest.EncodedAsJSON, resp.Body, &e) - resp.Body = ioutil.NopCloser(&b) - if decodeErr != nil { - return fmt.Errorf("autorest/azure: error response cannot be parsed: %q error: %v", b.String(), decodeErr) - } else if e.ServiceError == nil { - e.ServiceError = &ServiceError{Code: "Unknown", Message: "Unknown service error"} - } - - e.RequestID = ExtractRequestID(resp) - if e.StatusCode == nil { - e.StatusCode = resp.StatusCode - } - err = &e - } - return err - }) - } -} diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/config.go b/vendor/github.com/Azure/go-autorest/autorest/azure/config.go deleted file mode 100644 index bea30b0d67ea..000000000000 --- a/vendor/github.com/Azure/go-autorest/autorest/azure/config.go +++ /dev/null @@ -1,13 +0,0 @@ -package azure - -import ( - "net/url" -) - -// OAuthConfig represents the endpoints needed -// in OAuth operations -type OAuthConfig struct { - AuthorizeEndpoint url.URL - TokenEndpoint url.URL - DeviceCodeEndpoint url.URL -} diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/devicetoken.go b/vendor/github.com/Azure/go-autorest/autorest/azure/devicetoken.go deleted file mode 100644 index e1d5498a80f1..000000000000 --- a/vendor/github.com/Azure/go-autorest/autorest/azure/devicetoken.go +++ /dev/null @@ -1,193 +0,0 @@ -package azure - -/* - This file is largely based on rjw57/oauth2device's code, with the follow differences: - * scope -> resource, and only allow a single one - * receive "Message" in the DeviceCode struct and show it to users as the prompt - * azure-xplat-cli has the following behavior that this emulates: - - does not send client_secret during the token exchange - - sends resource again in the token exchange request -*/ - -import ( - "fmt" - "net/http" - "net/url" - "time" - - "github.com/Azure/go-autorest/autorest" -) - -const ( - logPrefix = "autorest/azure/devicetoken:" -) - -var ( - // ErrDeviceGeneric represents an unknown error from the token endpoint when using device flow - ErrDeviceGeneric = fmt.Errorf("%s Error while retrieving OAuth token: Unknown Error", logPrefix) - - // ErrDeviceAccessDenied represents an access denied error from the token endpoint when using device flow - ErrDeviceAccessDenied = fmt.Errorf("%s Error while retrieving OAuth token: Access Denied", logPrefix) - - // ErrDeviceAuthorizationPending represents the server waiting on the user to complete the device flow - ErrDeviceAuthorizationPending = fmt.Errorf("%s Error while retrieving OAuth token: Authorization Pending", logPrefix) - - // ErrDeviceCodeExpired represents the server timing out and expiring the code during device flow - ErrDeviceCodeExpired = fmt.Errorf("%s Error while retrieving OAuth token: Code Expired", logPrefix) - - // ErrDeviceSlowDown represents the service telling us we're polling too often during device flow - ErrDeviceSlowDown = fmt.Errorf("%s Error while retrieving OAuth token: Slow Down", logPrefix) - - errCodeSendingFails = "Error occurred while sending request for Device Authorization Code" - errCodeHandlingFails = "Error occurred while handling response from the Device Endpoint" - errTokenSendingFails = "Error occurred while sending request with device code for a token" - errTokenHandlingFails = "Error occurred while handling response from the Token Endpoint (during device flow)" -) - -// DeviceCode is the object returned by the device auth endpoint -// It contains information to instruct the user to complete the auth flow -type DeviceCode struct { - DeviceCode *string `json:"device_code,omitempty"` - UserCode *string `json:"user_code,omitempty"` - VerificationURL *string `json:"verification_url,omitempty"` - ExpiresIn *int64 `json:"expires_in,string,omitempty"` - Interval *int64 `json:"interval,string,omitempty"` - - Message *string `json:"message"` // Azure specific - Resource string // store the following, stored when initiating, used when exchanging - OAuthConfig OAuthConfig - ClientID string -} - -// TokenError is the object returned by the token exchange endpoint -// when something is amiss -type TokenError struct { - Error *string `json:"error,omitempty"` - ErrorCodes []int `json:"error_codes,omitempty"` - ErrorDescription *string `json:"error_description,omitempty"` - Timestamp *string `json:"timestamp,omitempty"` - TraceID *string `json:"trace_id,omitempty"` -} - -// DeviceToken is the object return by the token exchange endpoint -// It can either look like a Token or an ErrorToken, so put both here -// and check for presence of "Error" to know if we are in error state -type deviceToken struct { - Token - TokenError -} - -// InitiateDeviceAuth initiates a device auth flow. It returns a DeviceCode -// that can be used with CheckForUserCompletion or WaitForUserCompletion. -func InitiateDeviceAuth(client *autorest.Client, oauthConfig OAuthConfig, clientID, resource string) (*DeviceCode, error) { - req, _ := autorest.Prepare( - &http.Request{}, - autorest.AsPost(), - autorest.AsFormURLEncoded(), - autorest.WithBaseURL(oauthConfig.DeviceCodeEndpoint.String()), - autorest.WithFormData(url.Values{ - "client_id": []string{clientID}, - "resource": []string{resource}, - }), - ) - - resp, err := autorest.SendWithSender(client, req) - if err != nil { - return nil, fmt.Errorf("%s %s: %s", logPrefix, errCodeSendingFails, err) - } - - var code DeviceCode - err = autorest.Respond( - resp, - autorest.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&code), - autorest.ByClosing()) - if err != nil { - return nil, fmt.Errorf("%s %s: %s", logPrefix, errCodeHandlingFails, err) - } - - code.ClientID = clientID - code.Resource = resource - code.OAuthConfig = oauthConfig - - return &code, nil -} - -// CheckForUserCompletion takes a DeviceCode and checks with the Azure AD OAuth endpoint -// to see if the device flow has: been completed, timed out, or otherwise failed -func CheckForUserCompletion(client *autorest.Client, code *DeviceCode) (*Token, error) { - req, _ := autorest.Prepare( - &http.Request{}, - autorest.AsPost(), - autorest.AsFormURLEncoded(), - autorest.WithBaseURL(code.OAuthConfig.TokenEndpoint.String()), - autorest.WithFormData(url.Values{ - "client_id": []string{code.ClientID}, - "code": []string{*code.DeviceCode}, - "grant_type": []string{OAuthGrantTypeDeviceCode}, - "resource": []string{code.Resource}, - }), - ) - - resp, err := autorest.SendWithSender(client, req) - if err != nil { - return nil, fmt.Errorf("%s %s: %s", logPrefix, errTokenSendingFails, err) - } - - var token deviceToken - err = autorest.Respond( - resp, - autorest.WithErrorUnlessStatusCode(http.StatusOK, http.StatusBadRequest), - autorest.ByUnmarshallingJSON(&token), - autorest.ByClosing()) - if err != nil { - return nil, fmt.Errorf("%s %s: %s", logPrefix, errTokenHandlingFails, err) - } - - if token.Error == nil { - return &token.Token, nil - } - - switch *token.Error { - case "authorization_pending": - return nil, ErrDeviceAuthorizationPending - case "slow_down": - return nil, ErrDeviceSlowDown - case "access_denied": - return nil, ErrDeviceAccessDenied - case "code_expired": - return nil, ErrDeviceCodeExpired - default: - return nil, ErrDeviceGeneric - } -} - -// WaitForUserCompletion calls CheckForUserCompletion repeatedly until a token is granted or an error state occurs. -// This prevents the user from looping and checking against 'ErrDeviceAuthorizationPending'. -func WaitForUserCompletion(client *autorest.Client, code *DeviceCode) (*Token, error) { - intervalDuration := time.Duration(*code.Interval) * time.Second - waitDuration := intervalDuration - - for { - token, err := CheckForUserCompletion(client, code) - - if err == nil { - return token, nil - } - - switch err { - case ErrDeviceSlowDown: - waitDuration += waitDuration - case ErrDeviceAuthorizationPending: - // noop - default: // everything else is "fatal" to us - return nil, err - } - - if waitDuration > (intervalDuration * 3) { - return nil, fmt.Errorf("%s Error waiting for user to complete device flow. Server told us to slow_down too much", logPrefix) - } - - time.Sleep(waitDuration) - } -} diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/environments.go b/vendor/github.com/Azure/go-autorest/autorest/azure/environments.go deleted file mode 100644 index 4701b4376419..000000000000 --- a/vendor/github.com/Azure/go-autorest/autorest/azure/environments.go +++ /dev/null @@ -1,167 +0,0 @@ -package azure - -import ( - "fmt" - "net/url" - "strings" -) - -const ( - activeDirectoryAPIVersion = "1.0" -) - -var environments = map[string]Environment{ - "AZURECHINACLOUD": ChinaCloud, - "AZUREGERMANCLOUD": GermanCloud, - "AZUREPUBLICCLOUD": PublicCloud, - "AZUREUSGOVERNMENTCLOUD": USGovernmentCloud, -} - -// Environment represents a set of endpoints for each of Azure's Clouds. -type Environment struct { - Name string `json:"name"` - ManagementPortalURL string `json:"managementPortalURL"` - PublishSettingsURL string `json:"publishSettingsURL"` - ServiceManagementEndpoint string `json:"serviceManagementEndpoint"` - ResourceManagerEndpoint string `json:"resourceManagerEndpoint"` - ActiveDirectoryEndpoint string `json:"activeDirectoryEndpoint"` - GalleryEndpoint string `json:"galleryEndpoint"` - KeyVaultEndpoint string `json:"keyVaultEndpoint"` - GraphEndpoint string `json:"graphEndpoint"` - StorageEndpointSuffix string `json:"storageEndpointSuffix"` - SQLDatabaseDNSSuffix string `json:"sqlDatabaseDNSSuffix"` - TrafficManagerDNSSuffix string `json:"trafficManagerDNSSuffix"` - KeyVaultDNSSuffix string `json:"keyVaultDNSSuffix"` - ServiceBusEndpointSuffix string `json:"serviceBusEndpointSuffix"` - ServiceManagementVMDNSSuffix string `json:"serviceManagementVMDNSSuffix"` - ResourceManagerVMDNSSuffix string `json:"resourceManagerVMDNSSuffix"` - ContainerRegistryDNSSuffix string `json:"containerRegistryDNSSuffix"` -} - -var ( - // PublicCloud is the default public Azure cloud environment - PublicCloud = Environment{ - Name: "AzurePublicCloud", - ManagementPortalURL: "https://manage.windowsazure.com/", - PublishSettingsURL: "https://manage.windowsazure.com/publishsettings/index", - ServiceManagementEndpoint: "https://management.core.windows.net/", - ResourceManagerEndpoint: "https://management.azure.com/", - ActiveDirectoryEndpoint: "https://login.microsoftonline.com/", - GalleryEndpoint: "https://gallery.azure.com/", - KeyVaultEndpoint: "https://vault.azure.net/", - GraphEndpoint: "https://graph.windows.net/", - StorageEndpointSuffix: "core.windows.net", - SQLDatabaseDNSSuffix: "database.windows.net", - TrafficManagerDNSSuffix: "trafficmanager.net", - KeyVaultDNSSuffix: "vault.azure.net", - ServiceBusEndpointSuffix: "servicebus.azure.com", - ServiceManagementVMDNSSuffix: "cloudapp.net", - ResourceManagerVMDNSSuffix: "cloudapp.azure.com", - ContainerRegistryDNSSuffix: "azurecr.io", - } - - // USGovernmentCloud is the cloud environment for the US Government - USGovernmentCloud = Environment{ - Name: "AzureUSGovernmentCloud", - ManagementPortalURL: "https://manage.windowsazure.us/", - PublishSettingsURL: "https://manage.windowsazure.us/publishsettings/index", - ServiceManagementEndpoint: "https://management.core.usgovcloudapi.net/", - ResourceManagerEndpoint: "https://management.usgovcloudapi.net/", - ActiveDirectoryEndpoint: "https://login.microsoftonline.com/", - GalleryEndpoint: "https://gallery.usgovcloudapi.net/", - KeyVaultEndpoint: "https://vault.usgovcloudapi.net/", - GraphEndpoint: "https://graph.usgovcloudapi.net/", - StorageEndpointSuffix: "core.usgovcloudapi.net", - SQLDatabaseDNSSuffix: "database.usgovcloudapi.net", - TrafficManagerDNSSuffix: "usgovtrafficmanager.net", - KeyVaultDNSSuffix: "vault.usgovcloudapi.net", - ServiceBusEndpointSuffix: "servicebus.usgovcloudapi.net", - ServiceManagementVMDNSSuffix: "usgovcloudapp.net", - ResourceManagerVMDNSSuffix: "cloudapp.windowsazure.us", - ContainerRegistryDNSSuffix: "azurecr.io", - } - - // ChinaCloud is the cloud environment operated in China - ChinaCloud = Environment{ - Name: "AzureChinaCloud", - ManagementPortalURL: "https://manage.chinacloudapi.com/", - PublishSettingsURL: "https://manage.chinacloudapi.com/publishsettings/index", - ServiceManagementEndpoint: "https://management.core.chinacloudapi.cn/", - ResourceManagerEndpoint: "https://management.chinacloudapi.cn/", - ActiveDirectoryEndpoint: "https://login.chinacloudapi.cn/", - GalleryEndpoint: "https://gallery.chinacloudapi.cn/", - KeyVaultEndpoint: "https://vault.azure.cn/", - GraphEndpoint: "https://graph.chinacloudapi.cn/", - StorageEndpointSuffix: "core.chinacloudapi.cn", - SQLDatabaseDNSSuffix: "database.chinacloudapi.cn", - TrafficManagerDNSSuffix: "trafficmanager.cn", - KeyVaultDNSSuffix: "vault.azure.cn", - ServiceBusEndpointSuffix: "servicebus.chinacloudapi.net", - ServiceManagementVMDNSSuffix: "chinacloudapp.cn", - ResourceManagerVMDNSSuffix: "cloudapp.azure.cn", - ContainerRegistryDNSSuffix: "azurecr.io", - } - - // GermanCloud is the cloud environment operated in Germany - GermanCloud = Environment{ - Name: "AzureGermanCloud", - ManagementPortalURL: "http://portal.microsoftazure.de/", - PublishSettingsURL: "https://manage.microsoftazure.de/publishsettings/index", - ServiceManagementEndpoint: "https://management.core.cloudapi.de/", - ResourceManagerEndpoint: "https://management.microsoftazure.de/", - ActiveDirectoryEndpoint: "https://login.microsoftonline.de/", - GalleryEndpoint: "https://gallery.cloudapi.de/", - KeyVaultEndpoint: "https://vault.microsoftazure.de/", - GraphEndpoint: "https://graph.cloudapi.de/", - StorageEndpointSuffix: "core.cloudapi.de", - SQLDatabaseDNSSuffix: "database.cloudapi.de", - TrafficManagerDNSSuffix: "azuretrafficmanager.de", - KeyVaultDNSSuffix: "vault.microsoftazure.de", - ServiceBusEndpointSuffix: "servicebus.cloudapi.de", - ServiceManagementVMDNSSuffix: "azurecloudapp.de", - ResourceManagerVMDNSSuffix: "cloudapp.microsoftazure.de", - ContainerRegistryDNSSuffix: "azurecr.io", - } -) - -// EnvironmentFromName returns an Environment based on the common name specified -func EnvironmentFromName(name string) (Environment, error) { - name = strings.ToUpper(name) - env, ok := environments[name] - if !ok { - return env, fmt.Errorf("autorest/azure: There is no cloud environment matching the name %q", name) - } - return env, nil -} - -// OAuthConfigForTenant returns an OAuthConfig with tenant specific urls -func (env Environment) OAuthConfigForTenant(tenantID string) (*OAuthConfig, error) { - return OAuthConfigForTenant(env.ActiveDirectoryEndpoint, tenantID) -} - -// OAuthConfigForTenant returns an OAuthConfig with tenant specific urls for target cloud auth endpoint -func OAuthConfigForTenant(activeDirectoryEndpoint, tenantID string) (*OAuthConfig, error) { - template := "%s/oauth2/%s?api-version=%s" - u, err := url.Parse(activeDirectoryEndpoint) - if err != nil { - return nil, err - } - authorizeURL, err := u.Parse(fmt.Sprintf(template, tenantID, "authorize", activeDirectoryAPIVersion)) - if err != nil { - return nil, err - } - tokenURL, err := u.Parse(fmt.Sprintf(template, tenantID, "token", activeDirectoryAPIVersion)) - if err != nil { - return nil, err - } - deviceCodeURL, err := u.Parse(fmt.Sprintf(template, tenantID, "devicecode", activeDirectoryAPIVersion)) - if err != nil { - return nil, err - } - - return &OAuthConfig{ - AuthorizeEndpoint: *authorizeURL, - TokenEndpoint: *tokenURL, - DeviceCodeEndpoint: *deviceCodeURL, - }, nil -} diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/persist.go b/vendor/github.com/Azure/go-autorest/autorest/azure/persist.go deleted file mode 100644 index d5cf62ddc7ba..000000000000 --- a/vendor/github.com/Azure/go-autorest/autorest/azure/persist.go +++ /dev/null @@ -1,59 +0,0 @@ -package azure - -import ( - "encoding/json" - "fmt" - "io/ioutil" - "os" - "path/filepath" -) - -// LoadToken restores a Token object from a file located at 'path'. -func LoadToken(path string) (*Token, error) { - file, err := os.Open(path) - if err != nil { - return nil, fmt.Errorf("failed to open file (%s) while loading token: %v", path, err) - } - defer file.Close() - - var token Token - - dec := json.NewDecoder(file) - if err = dec.Decode(&token); err != nil { - return nil, fmt.Errorf("failed to decode contents of file (%s) into Token representation: %v", path, err) - } - return &token, nil -} - -// SaveToken persists an oauth token at the given location on disk. -// It moves the new file into place so it can safely be used to replace an existing file -// that maybe accessed by multiple processes. -func SaveToken(path string, mode os.FileMode, token Token) error { - dir := filepath.Dir(path) - err := os.MkdirAll(dir, os.ModePerm) - if err != nil { - return fmt.Errorf("failed to create directory (%s) to store token in: %v", dir, err) - } - - newFile, err := ioutil.TempFile(dir, "token") - if err != nil { - return fmt.Errorf("failed to create the temp file to write the token: %v", err) - } - tempPath := newFile.Name() - - if err := json.NewEncoder(newFile).Encode(token); err != nil { - return fmt.Errorf("failed to encode token to file (%s) while saving token: %v", tempPath, err) - } - if err := newFile.Close(); err != nil { - return fmt.Errorf("failed to close temp file %s: %v", tempPath, err) - } - - // Atomic replace to avoid multi-writer file corruptions - if err := os.Rename(tempPath, path); err != nil { - return fmt.Errorf("failed to move temporary token to desired output location. src=%s dst=%s: %v", tempPath, path, err) - } - if err := os.Chmod(path, mode); err != nil { - return fmt.Errorf("failed to chmod the token file %s: %v", path, err) - } - return nil -} diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/token.go b/vendor/github.com/Azure/go-autorest/autorest/azure/token.go deleted file mode 100644 index cfcd030114c6..000000000000 --- a/vendor/github.com/Azure/go-autorest/autorest/azure/token.go +++ /dev/null @@ -1,363 +0,0 @@ -package azure - -import ( - "crypto/rand" - "crypto/rsa" - "crypto/sha1" - "crypto/x509" - "encoding/base64" - "fmt" - "net/http" - "net/url" - "strconv" - "time" - - "github.com/Azure/go-autorest/autorest" - "github.com/dgrijalva/jwt-go" -) - -const ( - defaultRefresh = 5 * time.Minute - tokenBaseDate = "1970-01-01T00:00:00Z" - - // OAuthGrantTypeDeviceCode is the "grant_type" identifier used in device flow - OAuthGrantTypeDeviceCode = "device_code" - - // OAuthGrantTypeClientCredentials is the "grant_type" identifier used in credential flows - OAuthGrantTypeClientCredentials = "client_credentials" - - // OAuthGrantTypeRefreshToken is the "grant_type" identifier used in refresh token flows - OAuthGrantTypeRefreshToken = "refresh_token" -) - -var expirationBase time.Time - -func init() { - expirationBase, _ = time.Parse(time.RFC3339, tokenBaseDate) -} - -// TokenRefreshCallback is the type representing callbacks that will be called after -// a successful token refresh -type TokenRefreshCallback func(Token) error - -// Token encapsulates the access token used to authorize Azure requests. -type Token struct { - AccessToken string `json:"access_token"` - RefreshToken string `json:"refresh_token"` - - ExpiresIn string `json:"expires_in"` - ExpiresOn string `json:"expires_on"` - NotBefore string `json:"not_before"` - - Resource string `json:"resource"` - Type string `json:"token_type"` -} - -// Expires returns the time.Time when the Token expires. -func (t Token) Expires() time.Time { - s, err := strconv.Atoi(t.ExpiresOn) - if err != nil { - s = -3600 - } - return expirationBase.Add(time.Duration(s) * time.Second).UTC() -} - -// IsExpired returns true if the Token is expired, false otherwise. -func (t Token) IsExpired() bool { - return t.WillExpireIn(0) -} - -// WillExpireIn returns true if the Token will expire after the passed time.Duration interval -// from now, false otherwise. -func (t Token) WillExpireIn(d time.Duration) bool { - return !t.Expires().After(time.Now().Add(d)) -} - -// WithAuthorization returns a PrepareDecorator that adds an HTTP Authorization header whose -// value is "Bearer " followed by the AccessToken of the Token. -func (t *Token) WithAuthorization() autorest.PrepareDecorator { - return func(p autorest.Preparer) autorest.Preparer { - return autorest.PreparerFunc(func(r *http.Request) (*http.Request, error) { - return (autorest.WithBearerAuthorization(t.AccessToken)(p)).Prepare(r) - }) - } -} - -// ServicePrincipalNoSecret represents a secret type that contains no secret -// meaning it is not valid for fetching a fresh token. This is used by Manual -type ServicePrincipalNoSecret struct { -} - -// SetAuthenticationValues is a method of the interface ServicePrincipalSecret -// It only returns an error for the ServicePrincipalNoSecret type -func (noSecret *ServicePrincipalNoSecret) SetAuthenticationValues(spt *ServicePrincipalToken, v *url.Values) error { - return fmt.Errorf("Manually created ServicePrincipalToken does not contain secret material to retrieve a new access token") -} - -// ServicePrincipalSecret is an interface that allows various secret mechanism to fill the form -// that is submitted when acquiring an oAuth token. -type ServicePrincipalSecret interface { - SetAuthenticationValues(spt *ServicePrincipalToken, values *url.Values) error -} - -// ServicePrincipalTokenSecret implements ServicePrincipalSecret for client_secret type authorization. -type ServicePrincipalTokenSecret struct { - ClientSecret string -} - -// SetAuthenticationValues is a method of the interface ServicePrincipalSecret. -// It will populate the form submitted during oAuth Token Acquisition using the client_secret. -func (tokenSecret *ServicePrincipalTokenSecret) SetAuthenticationValues(spt *ServicePrincipalToken, v *url.Values) error { - v.Set("client_secret", tokenSecret.ClientSecret) - return nil -} - -// ServicePrincipalCertificateSecret implements ServicePrincipalSecret for generic RSA cert auth with signed JWTs. -type ServicePrincipalCertificateSecret struct { - Certificate *x509.Certificate - PrivateKey *rsa.PrivateKey -} - -// SignJwt returns the JWT signed with the certificate's private key. -func (secret *ServicePrincipalCertificateSecret) SignJwt(spt *ServicePrincipalToken) (string, error) { - hasher := sha1.New() - _, err := hasher.Write(secret.Certificate.Raw) - if err != nil { - return "", err - } - - thumbprint := base64.URLEncoding.EncodeToString(hasher.Sum(nil)) - - // The jti (JWT ID) claim provides a unique identifier for the JWT. - jti := make([]byte, 20) - _, err = rand.Read(jti) - if err != nil { - return "", err - } - - token := jwt.New(jwt.SigningMethodRS256) - token.Header["x5t"] = thumbprint - token.Claims = jwt.MapClaims{ - "aud": spt.oauthConfig.TokenEndpoint.String(), - "iss": spt.clientID, - "sub": spt.clientID, - "jti": base64.URLEncoding.EncodeToString(jti), - "nbf": time.Now().Unix(), - "exp": time.Now().Add(time.Hour * 24).Unix(), - } - - signedString, err := token.SignedString(secret.PrivateKey) - return signedString, err -} - -// SetAuthenticationValues is a method of the interface ServicePrincipalSecret. -// It will populate the form submitted during oAuth Token Acquisition using a JWT signed with a certificate. -func (secret *ServicePrincipalCertificateSecret) SetAuthenticationValues(spt *ServicePrincipalToken, v *url.Values) error { - jwt, err := secret.SignJwt(spt) - if err != nil { - return err - } - - v.Set("client_assertion", jwt) - v.Set("client_assertion_type", "urn:ietf:params:oauth:client-assertion-type:jwt-bearer") - return nil -} - -// ServicePrincipalToken encapsulates a Token created for a Service Principal. -type ServicePrincipalToken struct { - Token - - secret ServicePrincipalSecret - oauthConfig OAuthConfig - clientID string - resource string - autoRefresh bool - refreshWithin time.Duration - sender autorest.Sender - - refreshCallbacks []TokenRefreshCallback -} - -// NewServicePrincipalTokenWithSecret create a ServicePrincipalToken using the supplied ServicePrincipalSecret implementation. -func NewServicePrincipalTokenWithSecret(oauthConfig OAuthConfig, id string, resource string, secret ServicePrincipalSecret, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { - spt := &ServicePrincipalToken{ - oauthConfig: oauthConfig, - secret: secret, - clientID: id, - resource: resource, - autoRefresh: true, - refreshWithin: defaultRefresh, - sender: &http.Client{}, - refreshCallbacks: callbacks, - } - return spt, nil -} - -// NewServicePrincipalTokenFromManualToken creates a ServicePrincipalToken using the supplied token -func NewServicePrincipalTokenFromManualToken(oauthConfig OAuthConfig, clientID string, resource string, token Token, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { - spt, err := NewServicePrincipalTokenWithSecret( - oauthConfig, - clientID, - resource, - &ServicePrincipalNoSecret{}, - callbacks...) - if err != nil { - return nil, err - } - - spt.Token = token - - return spt, nil -} - -// NewServicePrincipalToken creates a ServicePrincipalToken from the supplied Service Principal -// credentials scoped to the named resource. -func NewServicePrincipalToken(oauthConfig OAuthConfig, clientID string, secret string, resource string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { - return NewServicePrincipalTokenWithSecret( - oauthConfig, - clientID, - resource, - &ServicePrincipalTokenSecret{ - ClientSecret: secret, - }, - callbacks..., - ) -} - -// NewServicePrincipalTokenFromCertificate create a ServicePrincipalToken from the supplied pkcs12 bytes. -func NewServicePrincipalTokenFromCertificate(oauthConfig OAuthConfig, clientID string, certificate *x509.Certificate, privateKey *rsa.PrivateKey, resource string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { - return NewServicePrincipalTokenWithSecret( - oauthConfig, - clientID, - resource, - &ServicePrincipalCertificateSecret{ - PrivateKey: privateKey, - Certificate: certificate, - }, - callbacks..., - ) -} - -// EnsureFresh will refresh the token if it will expire within the refresh window (as set by -// RefreshWithin). -func (spt *ServicePrincipalToken) EnsureFresh() error { - if spt.WillExpireIn(spt.refreshWithin) { - return spt.Refresh() - } - return nil -} - -// InvokeRefreshCallbacks calls any TokenRefreshCallbacks that were added to the SPT during initialization -func (spt *ServicePrincipalToken) InvokeRefreshCallbacks(token Token) error { - if spt.refreshCallbacks != nil { - for _, callback := range spt.refreshCallbacks { - err := callback(spt.Token) - if err != nil { - return autorest.NewErrorWithError(err, - "azure.ServicePrincipalToken", "InvokeRefreshCallbacks", nil, "A TokenRefreshCallback handler returned an error") - } - } - } - return nil -} - -// Refresh obtains a fresh token for the Service Principal. -func (spt *ServicePrincipalToken) Refresh() error { - return spt.refreshInternal(spt.resource) -} - -// RefreshExchange refreshes the token, but for a different resource. -func (spt *ServicePrincipalToken) RefreshExchange(resource string) error { - return spt.refreshInternal(resource) -} - -func (spt *ServicePrincipalToken) refreshInternal(resource string) error { - v := url.Values{} - v.Set("client_id", spt.clientID) - v.Set("resource", resource) - - if spt.RefreshToken != "" { - v.Set("grant_type", OAuthGrantTypeRefreshToken) - v.Set("refresh_token", spt.RefreshToken) - } else { - v.Set("grant_type", OAuthGrantTypeClientCredentials) - err := spt.secret.SetAuthenticationValues(spt, &v) - if err != nil { - return err - } - } - - req, _ := autorest.Prepare(&http.Request{}, - autorest.AsPost(), - autorest.AsFormURLEncoded(), - autorest.WithBaseURL(spt.oauthConfig.TokenEndpoint.String()), - autorest.WithFormData(v)) - - resp, err := autorest.SendWithSender(spt.sender, req) - if err != nil { - return autorest.NewErrorWithError(err, - "azure.ServicePrincipalToken", "Refresh", resp, "Failure sending request for Service Principal %s", - spt.clientID) - } - - var newToken Token - err = autorest.Respond(resp, - autorest.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&newToken), - autorest.ByClosing()) - if err != nil { - return autorest.NewErrorWithError(err, - "azure.ServicePrincipalToken", "Refresh", resp, "Failure handling response to Service Principal %s request", - spt.clientID) - } - - spt.Token = newToken - - err = spt.InvokeRefreshCallbacks(newToken) - if err != nil { - // its already wrapped inside InvokeRefreshCallbacks - return err - } - - return nil -} - -// SetAutoRefresh enables or disables automatic refreshing of stale tokens. -func (spt *ServicePrincipalToken) SetAutoRefresh(autoRefresh bool) { - spt.autoRefresh = autoRefresh -} - -// SetRefreshWithin sets the interval within which if the token will expire, EnsureFresh will -// refresh the token. -func (spt *ServicePrincipalToken) SetRefreshWithin(d time.Duration) { - spt.refreshWithin = d - return -} - -// SetSender sets the autorest.Sender used when obtaining the Service Principal token. An -// undecorated http.Client is used by default. -func (spt *ServicePrincipalToken) SetSender(s autorest.Sender) { - spt.sender = s -} - -// WithAuthorization returns a PrepareDecorator that adds an HTTP Authorization header whose -// value is "Bearer " followed by the AccessToken of the ServicePrincipalToken. -// -// By default, the token will automatically refresh if nearly expired (as determined by the -// RefreshWithin interval). Use the AutoRefresh method to enable or disable automatically refreshing -// tokens. -func (spt *ServicePrincipalToken) WithAuthorization() autorest.PrepareDecorator { - return func(p autorest.Preparer) autorest.Preparer { - return autorest.PreparerFunc(func(r *http.Request) (*http.Request, error) { - if spt.autoRefresh { - err := spt.EnsureFresh() - if err != nil { - return r, autorest.NewErrorWithError(err, - "azure.ServicePrincipalToken", "WithAuthorization", nil, "Failed to refresh Service Principal Token for request to %s", - r.URL) - } - } - return (autorest.WithBearerAuthorization(spt.AccessToken)(p)).Prepare(r) - }) - } -} diff --git a/vendor/github.com/Azure/go-autorest/autorest/client.go b/vendor/github.com/Azure/go-autorest/autorest/client.go deleted file mode 100644 index b5f94b5c3c75..000000000000 --- a/vendor/github.com/Azure/go-autorest/autorest/client.go +++ /dev/null @@ -1,235 +0,0 @@ -package autorest - -import ( - "bytes" - "fmt" - "io" - "io/ioutil" - "log" - "net/http" - "net/http/cookiejar" - "runtime" - "time" -) - -const ( - // DefaultPollingDelay is a reasonable delay between polling requests. - DefaultPollingDelay = 60 * time.Second - - // DefaultPollingDuration is a reasonable total polling duration. - DefaultPollingDuration = 15 * time.Minute - - // DefaultRetryAttempts is number of attempts for retry status codes (5xx). - DefaultRetryAttempts = 3 -) - -var ( - // defaultUserAgent builds a string containing the Go version, system archityecture and OS, - // and the go-autorest version. - defaultUserAgent = fmt.Sprintf("Go/%s (%s-%s) go-autorest/%s", - runtime.Version(), - runtime.GOARCH, - runtime.GOOS, - Version(), - ) - - statusCodesForRetry = []int{ - http.StatusRequestTimeout, // 408 - http.StatusInternalServerError, // 500 - http.StatusBadGateway, // 502 - http.StatusServiceUnavailable, // 503 - http.StatusGatewayTimeout, // 504 - } -) - -const ( - requestFormat = `HTTP Request Begin =================================================== -%s -===================================================== HTTP Request End -` - responseFormat = `HTTP Response Begin =================================================== -%s -===================================================== HTTP Response End -` -) - -// Response serves as the base for all responses from generated clients. It provides access to the -// last http.Response. -type Response struct { - *http.Response `json:"-"` -} - -// LoggingInspector implements request and response inspectors that log the full request and -// response to a supplied log. -type LoggingInspector struct { - Logger *log.Logger -} - -// WithInspection returns a PrepareDecorator that emits the http.Request to the supplied logger. The -// body is restored after being emitted. -// -// Note: Since it reads the entire Body, this decorator should not be used where body streaming is -// important. It is best used to trace JSON or similar body values. -func (li LoggingInspector) WithInspection() PrepareDecorator { - return func(p Preparer) Preparer { - return PreparerFunc(func(r *http.Request) (*http.Request, error) { - var body, b bytes.Buffer - - defer r.Body.Close() - - r.Body = ioutil.NopCloser(io.TeeReader(r.Body, &body)) - if err := r.Write(&b); err != nil { - return nil, fmt.Errorf("Failed to write response: %v", err) - } - - li.Logger.Printf(requestFormat, b.String()) - - r.Body = ioutil.NopCloser(&body) - return p.Prepare(r) - }) - } -} - -// ByInspecting returns a RespondDecorator that emits the http.Response to the supplied logger. The -// body is restored after being emitted. -// -// Note: Since it reads the entire Body, this decorator should not be used where body streaming is -// important. It is best used to trace JSON or similar body values. -func (li LoggingInspector) ByInspecting() RespondDecorator { - return func(r Responder) Responder { - return ResponderFunc(func(resp *http.Response) error { - var body, b bytes.Buffer - defer resp.Body.Close() - resp.Body = ioutil.NopCloser(io.TeeReader(resp.Body, &body)) - if err := resp.Write(&b); err != nil { - return fmt.Errorf("Failed to write response: %v", err) - } - - li.Logger.Printf(responseFormat, b.String()) - - resp.Body = ioutil.NopCloser(&body) - return r.Respond(resp) - }) - } -} - -// Client is the base for autorest generated clients. It provides default, "do nothing" -// implementations of an Authorizer, RequestInspector, and ResponseInspector. It also returns the -// standard, undecorated http.Client as a default Sender. -// -// Generated clients should also use Error (see NewError and NewErrorWithError) for errors and -// return responses that compose with Response. -// -// Most customization of generated clients is best achieved by supplying a custom Authorizer, custom -// RequestInspector, and / or custom ResponseInspector. Users may log requests, implement circuit -// breakers (see https://msdn.microsoft.com/en-us/library/dn589784.aspx) or otherwise influence -// sending the request by providing a decorated Sender. -type Client struct { - Authorizer Authorizer - Sender Sender - RequestInspector PrepareDecorator - ResponseInspector RespondDecorator - - // PollingDelay sets the polling frequency used in absence of a Retry-After HTTP header - PollingDelay time.Duration - - // PollingDuration sets the maximum polling time after which an error is returned. - PollingDuration time.Duration - - // RetryAttempts sets the default number of retry attempts for client. - RetryAttempts int - - // RetryDuration sets the delay duration for retries. - RetryDuration time.Duration - - // UserAgent, if not empty, will be set as the HTTP User-Agent header on all requests sent - // through the Do method. - UserAgent string - - Jar http.CookieJar -} - -// NewClientWithUserAgent returns an instance of a Client with the UserAgent set to the passed -// string. -func NewClientWithUserAgent(ua string) Client { - c := Client{ - PollingDelay: DefaultPollingDelay, - PollingDuration: DefaultPollingDuration, - RetryAttempts: DefaultRetryAttempts, - RetryDuration: 30 * time.Second, - UserAgent: defaultUserAgent, - } - c.AddToUserAgent(ua) - return c -} - -// AddToUserAgent adds an extension to the current user agent -func (c *Client) AddToUserAgent(extension string) error { - if extension != "" { - c.UserAgent = fmt.Sprintf("%s %s", c.UserAgent, extension) - return nil - } - return fmt.Errorf("Extension was empty, User Agent stayed as %s", c.UserAgent) -} - -// Do implements the Sender interface by invoking the active Sender after applying authorization. -// If Sender is not set, it uses a new instance of http.Client. In both cases it will, if UserAgent -// is set, apply set the User-Agent header. -func (c Client) Do(r *http.Request) (*http.Response, error) { - if r.UserAgent() == "" { - r, _ = Prepare(r, - WithUserAgent(c.UserAgent)) - } - r, err := Prepare(r, - c.WithInspection(), - c.WithAuthorization()) - if err != nil { - return nil, NewErrorWithError(err, "autorest/Client", "Do", nil, "Preparing request failed") - } - resp, err := SendWithSender(c.sender(), r, - DoRetryForStatusCodes(c.RetryAttempts, c.RetryDuration, statusCodesForRetry...)) - Respond(resp, - c.ByInspecting()) - return resp, err -} - -// sender returns the Sender to which to send requests. -func (c Client) sender() Sender { - if c.Sender == nil { - j, _ := cookiejar.New(nil) - return &http.Client{Jar: j} - } - return c.Sender -} - -// WithAuthorization is a convenience method that returns the WithAuthorization PrepareDecorator -// from the current Authorizer. If not Authorizer is set, it uses the NullAuthorizer. -func (c Client) WithAuthorization() PrepareDecorator { - return c.authorizer().WithAuthorization() -} - -// authorizer returns the Authorizer to use. -func (c Client) authorizer() Authorizer { - if c.Authorizer == nil { - return NullAuthorizer{} - } - return c.Authorizer -} - -// WithInspection is a convenience method that passes the request to the supplied RequestInspector, -// if present, or returns the WithNothing PrepareDecorator otherwise. -func (c Client) WithInspection() PrepareDecorator { - if c.RequestInspector == nil { - return WithNothing() - } - return c.RequestInspector -} - -// ByInspecting is a convenience method that passes the response to the supplied ResponseInspector, -// if present, or returns the ByIgnoring RespondDecorator otherwise. -func (c Client) ByInspecting() RespondDecorator { - if c.ResponseInspector == nil { - return ByIgnoring() - } - return c.ResponseInspector -} diff --git a/vendor/github.com/Azure/go-autorest/autorest/date/date.go b/vendor/github.com/Azure/go-autorest/autorest/date/date.go deleted file mode 100644 index 80ca60e9b08a..000000000000 --- a/vendor/github.com/Azure/go-autorest/autorest/date/date.go +++ /dev/null @@ -1,82 +0,0 @@ -/* -Package date provides time.Time derivatives that conform to the Swagger.io (https://swagger.io/) -defined date formats: Date and DateTime. Both types may, in most cases, be used in lieu of -time.Time types. And both convert to time.Time through a ToTime method. -*/ -package date - -import ( - "fmt" - "time" -) - -const ( - fullDate = "2006-01-02" - fullDateJSON = `"2006-01-02"` - dateFormat = "%04d-%02d-%02d" - jsonFormat = `"%04d-%02d-%02d"` -) - -// Date defines a type similar to time.Time but assumes a layout of RFC3339 full-date (i.e., -// 2006-01-02). -type Date struct { - time.Time -} - -// ParseDate create a new Date from the passed string. -func ParseDate(date string) (d Date, err error) { - return parseDate(date, fullDate) -} - -func parseDate(date string, format string) (Date, error) { - d, err := time.Parse(format, date) - return Date{Time: d}, err -} - -// MarshalBinary preserves the Date as a byte array conforming to RFC3339 full-date (i.e., -// 2006-01-02). -func (d Date) MarshalBinary() ([]byte, error) { - return d.MarshalText() -} - -// UnmarshalBinary reconstitutes a Date saved as a byte array conforming to RFC3339 full-date (i.e., -// 2006-01-02). -func (d *Date) UnmarshalBinary(data []byte) error { - return d.UnmarshalText(data) -} - -// MarshalJSON preserves the Date as a JSON string conforming to RFC3339 full-date (i.e., -// 2006-01-02). -func (d Date) MarshalJSON() (json []byte, err error) { - return []byte(fmt.Sprintf(jsonFormat, d.Year(), d.Month(), d.Day())), nil -} - -// UnmarshalJSON reconstitutes the Date from a JSON string conforming to RFC3339 full-date (i.e., -// 2006-01-02). -func (d *Date) UnmarshalJSON(data []byte) (err error) { - d.Time, err = time.Parse(fullDateJSON, string(data)) - return err -} - -// MarshalText preserves the Date as a byte array conforming to RFC3339 full-date (i.e., -// 2006-01-02). -func (d Date) MarshalText() (text []byte, err error) { - return []byte(fmt.Sprintf(dateFormat, d.Year(), d.Month(), d.Day())), nil -} - -// UnmarshalText reconstitutes a Date saved as a byte array conforming to RFC3339 full-date (i.e., -// 2006-01-02). -func (d *Date) UnmarshalText(data []byte) (err error) { - d.Time, err = time.Parse(fullDate, string(data)) - return err -} - -// String returns the Date formatted as an RFC3339 full-date string (i.e., 2006-01-02). -func (d Date) String() string { - return fmt.Sprintf(dateFormat, d.Year(), d.Month(), d.Day()) -} - -// ToTime returns a Date as a time.Time -func (d Date) ToTime() time.Time { - return d.Time -} diff --git a/vendor/github.com/Azure/go-autorest/autorest/date/time.go b/vendor/github.com/Azure/go-autorest/autorest/date/time.go deleted file mode 100644 index c1af6296348d..000000000000 --- a/vendor/github.com/Azure/go-autorest/autorest/date/time.go +++ /dev/null @@ -1,89 +0,0 @@ -package date - -import ( - "regexp" - "time" -) - -// Azure reports time in UTC but it doesn't include the 'Z' time zone suffix in some cases. -const ( - azureUtcFormatJSON = `"2006-01-02T15:04:05.999999999"` - azureUtcFormat = "2006-01-02T15:04:05.999999999" - rfc3339JSON = `"` + time.RFC3339Nano + `"` - rfc3339 = time.RFC3339Nano - tzOffsetRegex = `(Z|z|\+|-)(\d+:\d+)*"*$` -) - -// Time defines a type similar to time.Time but assumes a layout of RFC3339 date-time (i.e., -// 2006-01-02T15:04:05Z). -type Time struct { - time.Time -} - -// MarshalBinary preserves the Time as a byte array conforming to RFC3339 date-time (i.e., -// 2006-01-02T15:04:05Z). -func (t Time) MarshalBinary() ([]byte, error) { - return t.Time.MarshalText() -} - -// UnmarshalBinary reconstitutes a Time saved as a byte array conforming to RFC3339 date-time -// (i.e., 2006-01-02T15:04:05Z). -func (t *Time) UnmarshalBinary(data []byte) error { - return t.UnmarshalText(data) -} - -// MarshalJSON preserves the Time as a JSON string conforming to RFC3339 date-time (i.e., -// 2006-01-02T15:04:05Z). -func (t Time) MarshalJSON() (json []byte, err error) { - return t.Time.MarshalJSON() -} - -// UnmarshalJSON reconstitutes the Time from a JSON string conforming to RFC3339 date-time -// (i.e., 2006-01-02T15:04:05Z). -func (t *Time) UnmarshalJSON(data []byte) (err error) { - timeFormat := azureUtcFormatJSON - match, err := regexp.Match(tzOffsetRegex, data) - if err != nil { - return err - } else if match { - timeFormat = rfc3339JSON - } - t.Time, err = ParseTime(timeFormat, string(data)) - return err -} - -// MarshalText preserves the Time as a byte array conforming to RFC3339 date-time (i.e., -// 2006-01-02T15:04:05Z). -func (t Time) MarshalText() (text []byte, err error) { - return t.Time.MarshalText() -} - -// UnmarshalText reconstitutes a Time saved as a byte array conforming to RFC3339 date-time -// (i.e., 2006-01-02T15:04:05Z). -func (t *Time) UnmarshalText(data []byte) (err error) { - timeFormat := azureUtcFormat - match, err := regexp.Match(tzOffsetRegex, data) - if err != nil { - return err - } else if match { - timeFormat = rfc3339 - } - t.Time, err = ParseTime(timeFormat, string(data)) - return err -} - -// String returns the Time formatted as an RFC3339 date-time string (i.e., -// 2006-01-02T15:04:05Z). -func (t Time) String() string { - // Note: time.Time.String does not return an RFC3339 compliant string, time.Time.MarshalText does. - b, err := t.MarshalText() - if err != nil { - return "" - } - return string(b) -} - -// ToTime returns a Time as a time.Time -func (t Time) ToTime() time.Time { - return t.Time -} diff --git a/vendor/github.com/Azure/go-autorest/autorest/date/timerfc1123.go b/vendor/github.com/Azure/go-autorest/autorest/date/timerfc1123.go deleted file mode 100644 index 11995fb9f2c5..000000000000 --- a/vendor/github.com/Azure/go-autorest/autorest/date/timerfc1123.go +++ /dev/null @@ -1,86 +0,0 @@ -package date - -import ( - "errors" - "time" -) - -const ( - rfc1123JSON = `"` + time.RFC1123 + `"` - rfc1123 = time.RFC1123 -) - -// TimeRFC1123 defines a type similar to time.Time but assumes a layout of RFC1123 date-time (i.e., -// Mon, 02 Jan 2006 15:04:05 MST). -type TimeRFC1123 struct { - time.Time -} - -// UnmarshalJSON reconstitutes the Time from a JSON string conforming to RFC1123 date-time -// (i.e., Mon, 02 Jan 2006 15:04:05 MST). -func (t *TimeRFC1123) UnmarshalJSON(data []byte) (err error) { - t.Time, err = ParseTime(rfc1123JSON, string(data)) - if err != nil { - return err - } - return nil -} - -// MarshalJSON preserves the Time as a JSON string conforming to RFC1123 date-time (i.e., -// Mon, 02 Jan 2006 15:04:05 MST). -func (t TimeRFC1123) MarshalJSON() ([]byte, error) { - if y := t.Year(); y < 0 || y >= 10000 { - return nil, errors.New("Time.MarshalJSON: year outside of range [0,9999]") - } - b := []byte(t.Format(rfc1123JSON)) - return b, nil -} - -// MarshalText preserves the Time as a byte array conforming to RFC1123 date-time (i.e., -// Mon, 02 Jan 2006 15:04:05 MST). -func (t TimeRFC1123) MarshalText() ([]byte, error) { - if y := t.Year(); y < 0 || y >= 10000 { - return nil, errors.New("Time.MarshalText: year outside of range [0,9999]") - } - - b := []byte(t.Format(rfc1123)) - return b, nil -} - -// UnmarshalText reconstitutes a Time saved as a byte array conforming to RFC1123 date-time -// (i.e., Mon, 02 Jan 2006 15:04:05 MST). -func (t *TimeRFC1123) UnmarshalText(data []byte) (err error) { - t.Time, err = ParseTime(rfc1123, string(data)) - if err != nil { - return err - } - return nil -} - -// MarshalBinary preserves the Time as a byte array conforming to RFC1123 date-time (i.e., -// Mon, 02 Jan 2006 15:04:05 MST). -func (t TimeRFC1123) MarshalBinary() ([]byte, error) { - return t.MarshalText() -} - -// UnmarshalBinary reconstitutes a Time saved as a byte array conforming to RFC1123 date-time -// (i.e., Mon, 02 Jan 2006 15:04:05 MST). -func (t *TimeRFC1123) UnmarshalBinary(data []byte) error { - return t.UnmarshalText(data) -} - -// ToTime returns a Time as a time.Time -func (t TimeRFC1123) ToTime() time.Time { - return t.Time -} - -// String returns the Time formatted as an RFC1123 date-time string (i.e., -// Mon, 02 Jan 2006 15:04:05 MST). -func (t TimeRFC1123) String() string { - // Note: time.Time.String does not return an RFC1123 compliant string, time.Time.MarshalText does. - b, err := t.MarshalText() - if err != nil { - return "" - } - return string(b) -} diff --git a/vendor/github.com/Azure/go-autorest/autorest/date/utility.go b/vendor/github.com/Azure/go-autorest/autorest/date/utility.go deleted file mode 100644 index 207b1a240a3a..000000000000 --- a/vendor/github.com/Azure/go-autorest/autorest/date/utility.go +++ /dev/null @@ -1,11 +0,0 @@ -package date - -import ( - "strings" - "time" -) - -// ParseTime to parse Time string to specified format. -func ParseTime(format string, t string) (d time.Time, err error) { - return time.Parse(format, strings.ToUpper(t)) -} diff --git a/vendor/github.com/Azure/go-autorest/autorest/error.go b/vendor/github.com/Azure/go-autorest/autorest/error.go deleted file mode 100644 index 4bcb8f27b210..000000000000 --- a/vendor/github.com/Azure/go-autorest/autorest/error.go +++ /dev/null @@ -1,80 +0,0 @@ -package autorest - -import ( - "fmt" - "net/http" -) - -const ( - // UndefinedStatusCode is used when HTTP status code is not available for an error. - UndefinedStatusCode = 0 -) - -// DetailedError encloses a error with details of the package, method, and associated HTTP -// status code (if any). -type DetailedError struct { - Original error - - // PackageType is the package type of the object emitting the error. For types, the value - // matches that produced the the '%T' format specifier of the fmt package. For other elements, - // such as functions, it is just the package name (e.g., "autorest"). - PackageType string - - // Method is the name of the method raising the error. - Method string - - // StatusCode is the HTTP Response StatusCode (if non-zero) that led to the error. - StatusCode interface{} - - // Message is the error message. - Message string - - // Service Error is the response body of failed API in bytes - ServiceError []byte -} - -// NewError creates a new Error conforming object from the passed packageType, method, and -// message. message is treated as a format string to which the optional args apply. -func NewError(packageType string, method string, message string, args ...interface{}) DetailedError { - return NewErrorWithError(nil, packageType, method, nil, message, args...) -} - -// NewErrorWithResponse creates a new Error conforming object from the passed -// packageType, method, statusCode of the given resp (UndefinedStatusCode if -// resp is nil), and message. message is treated as a format string to which the -// optional args apply. -func NewErrorWithResponse(packageType string, method string, resp *http.Response, message string, args ...interface{}) DetailedError { - return NewErrorWithError(nil, packageType, method, resp, message, args...) -} - -// NewErrorWithError creates a new Error conforming object from the -// passed packageType, method, statusCode of the given resp (UndefinedStatusCode -// if resp is nil), message, and original error. message is treated as a format -// string to which the optional args apply. -func NewErrorWithError(original error, packageType string, method string, resp *http.Response, message string, args ...interface{}) DetailedError { - if v, ok := original.(DetailedError); ok { - return v - } - - statusCode := UndefinedStatusCode - if resp != nil { - statusCode = resp.StatusCode - } - - return DetailedError{ - Original: original, - PackageType: packageType, - Method: method, - StatusCode: statusCode, - Message: fmt.Sprintf(message, args...), - } -} - -// Error returns a formatted containing all available details (i.e., PackageType, Method, -// StatusCode, Message, and original error (if any)). -func (e DetailedError) Error() string { - if e.Original == nil { - return fmt.Sprintf("%s#%s: %s: StatusCode=%d", e.PackageType, e.Method, e.Message, e.StatusCode) - } - return fmt.Sprintf("%s#%s: %s: StatusCode=%d -- Original Error: %v", e.PackageType, e.Method, e.Message, e.StatusCode, e.Original) -} diff --git a/vendor/github.com/Azure/go-autorest/autorest/preparer.go b/vendor/github.com/Azure/go-autorest/autorest/preparer.go deleted file mode 100644 index c9deb261a1e1..000000000000 --- a/vendor/github.com/Azure/go-autorest/autorest/preparer.go +++ /dev/null @@ -1,443 +0,0 @@ -package autorest - -import ( - "bytes" - "encoding/json" - "fmt" - "io" - "io/ioutil" - "mime/multipart" - "net/http" - "net/url" - "strings" -) - -const ( - mimeTypeJSON = "application/json" - mimeTypeFormPost = "application/x-www-form-urlencoded" - - headerAuthorization = "Authorization" - headerContentType = "Content-Type" - headerUserAgent = "User-Agent" -) - -// Preparer is the interface that wraps the Prepare method. -// -// Prepare accepts and possibly modifies an http.Request (e.g., adding Headers). Implementations -// must ensure to not share or hold per-invocation state since Preparers may be shared and re-used. -type Preparer interface { - Prepare(*http.Request) (*http.Request, error) -} - -// PreparerFunc is a method that implements the Preparer interface. -type PreparerFunc func(*http.Request) (*http.Request, error) - -// Prepare implements the Preparer interface on PreparerFunc. -func (pf PreparerFunc) Prepare(r *http.Request) (*http.Request, error) { - return pf(r) -} - -// PrepareDecorator takes and possibly decorates, by wrapping, a Preparer. Decorators may affect the -// http.Request and pass it along or, first, pass the http.Request along then affect the result. -type PrepareDecorator func(Preparer) Preparer - -// CreatePreparer creates, decorates, and returns a Preparer. -// Without decorators, the returned Preparer returns the passed http.Request unmodified. -// Preparers are safe to share and re-use. -func CreatePreparer(decorators ...PrepareDecorator) Preparer { - return DecoratePreparer( - Preparer(PreparerFunc(func(r *http.Request) (*http.Request, error) { return r, nil })), - decorators...) -} - -// DecoratePreparer accepts a Preparer and a, possibly empty, set of PrepareDecorators, which it -// applies to the Preparer. Decorators are applied in the order received, but their affect upon the -// request depends on whether they are a pre-decorator (change the http.Request and then pass it -// along) or a post-decorator (pass the http.Request along and alter it on return). -func DecoratePreparer(p Preparer, decorators ...PrepareDecorator) Preparer { - for _, decorate := range decorators { - p = decorate(p) - } - return p -} - -// Prepare accepts an http.Request and a, possibly empty, set of PrepareDecorators. -// It creates a Preparer from the decorators which it then applies to the passed http.Request. -func Prepare(r *http.Request, decorators ...PrepareDecorator) (*http.Request, error) { - if r == nil { - return nil, NewError("autorest", "Prepare", "Invoked without an http.Request") - } - return CreatePreparer(decorators...).Prepare(r) -} - -// WithNothing returns a "do nothing" PrepareDecorator that makes no changes to the passed -// http.Request. -func WithNothing() PrepareDecorator { - return func(p Preparer) Preparer { - return PreparerFunc(func(r *http.Request) (*http.Request, error) { - return p.Prepare(r) - }) - } -} - -// WithHeader returns a PrepareDecorator that sets the specified HTTP header of the http.Request to -// the passed value. It canonicalizes the passed header name (via http.CanonicalHeaderKey) before -// adding the header. -func WithHeader(header string, value string) PrepareDecorator { - return func(p Preparer) Preparer { - return PreparerFunc(func(r *http.Request) (*http.Request, error) { - r, err := p.Prepare(r) - if err == nil { - if r.Header == nil { - r.Header = make(http.Header) - } - r.Header.Set(http.CanonicalHeaderKey(header), value) - } - return r, err - }) - } -} - -// WithBearerAuthorization returns a PrepareDecorator that adds an HTTP Authorization header whose -// value is "Bearer " followed by the supplied token. -func WithBearerAuthorization(token string) PrepareDecorator { - return WithHeader(headerAuthorization, fmt.Sprintf("Bearer %s", token)) -} - -// AsContentType returns a PrepareDecorator that adds an HTTP Content-Type header whose value -// is the passed contentType. -func AsContentType(contentType string) PrepareDecorator { - return WithHeader(headerContentType, contentType) -} - -// WithUserAgent returns a PrepareDecorator that adds an HTTP User-Agent header whose value is the -// passed string. -func WithUserAgent(ua string) PrepareDecorator { - return WithHeader(headerUserAgent, ua) -} - -// AsFormURLEncoded returns a PrepareDecorator that adds an HTTP Content-Type header whose value is -// "application/x-www-form-urlencoded". -func AsFormURLEncoded() PrepareDecorator { - return AsContentType(mimeTypeFormPost) -} - -// AsJSON returns a PrepareDecorator that adds an HTTP Content-Type header whose value is -// "application/json". -func AsJSON() PrepareDecorator { - return AsContentType(mimeTypeJSON) -} - -// WithMethod returns a PrepareDecorator that sets the HTTP method of the passed request. The -// decorator does not validate that the passed method string is a known HTTP method. -func WithMethod(method string) PrepareDecorator { - return func(p Preparer) Preparer { - return PreparerFunc(func(r *http.Request) (*http.Request, error) { - r.Method = method - return p.Prepare(r) - }) - } -} - -// AsDelete returns a PrepareDecorator that sets the HTTP method to DELETE. -func AsDelete() PrepareDecorator { return WithMethod("DELETE") } - -// AsGet returns a PrepareDecorator that sets the HTTP method to GET. -func AsGet() PrepareDecorator { return WithMethod("GET") } - -// AsHead returns a PrepareDecorator that sets the HTTP method to HEAD. -func AsHead() PrepareDecorator { return WithMethod("HEAD") } - -// AsOptions returns a PrepareDecorator that sets the HTTP method to OPTIONS. -func AsOptions() PrepareDecorator { return WithMethod("OPTIONS") } - -// AsPatch returns a PrepareDecorator that sets the HTTP method to PATCH. -func AsPatch() PrepareDecorator { return WithMethod("PATCH") } - -// AsPost returns a PrepareDecorator that sets the HTTP method to POST. -func AsPost() PrepareDecorator { return WithMethod("POST") } - -// AsPut returns a PrepareDecorator that sets the HTTP method to PUT. -func AsPut() PrepareDecorator { return WithMethod("PUT") } - -// WithBaseURL returns a PrepareDecorator that populates the http.Request with a url.URL constructed -// from the supplied baseUrl. -func WithBaseURL(baseURL string) PrepareDecorator { - return func(p Preparer) Preparer { - return PreparerFunc(func(r *http.Request) (*http.Request, error) { - r, err := p.Prepare(r) - if err == nil { - var u *url.URL - if u, err = url.Parse(baseURL); err != nil { - return r, err - } - if u.Scheme == "" { - err = fmt.Errorf("autorest: No scheme detected in URL %s", baseURL) - } - if err == nil { - r.URL = u - } - } - return r, err - }) - } -} - -// WithCustomBaseURL returns a PrepareDecorator that replaces brace-enclosed keys within the -// request base URL (i.e., http.Request.URL) with the corresponding values from the passed map. -func WithCustomBaseURL(baseURL string, urlParameters map[string]interface{}) PrepareDecorator { - parameters := ensureValueStrings(urlParameters) - for key, value := range parameters { - baseURL = strings.Replace(baseURL, "{"+key+"}", value, -1) - } - return WithBaseURL(baseURL) -} - -// WithFormData returns a PrepareDecoratore that "URL encodes" (e.g., bar=baz&foo=quux) into the -// http.Request body. -func WithFormData(v url.Values) PrepareDecorator { - return func(p Preparer) Preparer { - return PreparerFunc(func(r *http.Request) (*http.Request, error) { - r, err := p.Prepare(r) - if err == nil { - s := v.Encode() - r.ContentLength = int64(len(s)) - r.Body = ioutil.NopCloser(strings.NewReader(s)) - } - return r, err - }) - } -} - -// WithMultiPartFormData returns a PrepareDecoratore that "URL encodes" (e.g., bar=baz&foo=quux) form parameters -// into the http.Request body. -func WithMultiPartFormData(formDataParameters map[string]interface{}) PrepareDecorator { - return func(p Preparer) Preparer { - return PreparerFunc(func(r *http.Request) (*http.Request, error) { - r, err := p.Prepare(r) - if err == nil { - var body bytes.Buffer - writer := multipart.NewWriter(&body) - for key, value := range formDataParameters { - if rc, ok := value.(io.ReadCloser); ok { - var fd io.Writer - if fd, err = writer.CreateFormFile(key, key); err != nil { - return r, err - } - if _, err = io.Copy(fd, rc); err != nil { - return r, err - } - } else { - if err = writer.WriteField(key, ensureValueString(value)); err != nil { - return r, err - } - } - } - if err = writer.Close(); err != nil { - return r, err - } - if r.Header == nil { - r.Header = make(http.Header) - } - r.Header.Set(http.CanonicalHeaderKey(headerContentType), writer.FormDataContentType()) - r.Body = ioutil.NopCloser(bytes.NewReader(body.Bytes())) - r.ContentLength = int64(body.Len()) - return r, err - } - return r, err - }) - } -} - -// WithFile returns a PrepareDecorator that sends file in request body. -func WithFile(f io.ReadCloser) PrepareDecorator { - return func(p Preparer) Preparer { - return PreparerFunc(func(r *http.Request) (*http.Request, error) { - r, err := p.Prepare(r) - if err == nil { - b, err := ioutil.ReadAll(f) - if err != nil { - return r, err - } - r.Body = ioutil.NopCloser(bytes.NewReader(b)) - r.ContentLength = int64(len(b)) - } - return r, err - }) - } -} - -// WithBool returns a PrepareDecorator that encodes the passed bool into the body of the request -// and sets the Content-Length header. -func WithBool(v bool) PrepareDecorator { - return WithString(fmt.Sprintf("%v", v)) -} - -// WithFloat32 returns a PrepareDecorator that encodes the passed float32 into the body of the -// request and sets the Content-Length header. -func WithFloat32(v float32) PrepareDecorator { - return WithString(fmt.Sprintf("%v", v)) -} - -// WithFloat64 returns a PrepareDecorator that encodes the passed float64 into the body of the -// request and sets the Content-Length header. -func WithFloat64(v float64) PrepareDecorator { - return WithString(fmt.Sprintf("%v", v)) -} - -// WithInt32 returns a PrepareDecorator that encodes the passed int32 into the body of the request -// and sets the Content-Length header. -func WithInt32(v int32) PrepareDecorator { - return WithString(fmt.Sprintf("%v", v)) -} - -// WithInt64 returns a PrepareDecorator that encodes the passed int64 into the body of the request -// and sets the Content-Length header. -func WithInt64(v int64) PrepareDecorator { - return WithString(fmt.Sprintf("%v", v)) -} - -// WithString returns a PrepareDecorator that encodes the passed string into the body of the request -// and sets the Content-Length header. -func WithString(v string) PrepareDecorator { - return func(p Preparer) Preparer { - return PreparerFunc(func(r *http.Request) (*http.Request, error) { - r, err := p.Prepare(r) - if err == nil { - r.ContentLength = int64(len(v)) - r.Body = ioutil.NopCloser(strings.NewReader(v)) - } - return r, err - }) - } -} - -// WithJSON returns a PrepareDecorator that encodes the data passed as JSON into the body of the -// request and sets the Content-Length header. -func WithJSON(v interface{}) PrepareDecorator { - return func(p Preparer) Preparer { - return PreparerFunc(func(r *http.Request) (*http.Request, error) { - r, err := p.Prepare(r) - if err == nil { - b, err := json.Marshal(v) - if err == nil { - r.ContentLength = int64(len(b)) - r.Body = ioutil.NopCloser(bytes.NewReader(b)) - } - } - return r, err - }) - } -} - -// WithPath returns a PrepareDecorator that adds the supplied path to the request URL. If the path -// is absolute (that is, it begins with a "/"), it replaces the existing path. -func WithPath(path string) PrepareDecorator { - return func(p Preparer) Preparer { - return PreparerFunc(func(r *http.Request) (*http.Request, error) { - r, err := p.Prepare(r) - if err == nil { - if r.URL == nil { - return r, NewError("autorest", "WithPath", "Invoked with a nil URL") - } - if r.URL, err = parseURL(r.URL, path); err != nil { - return r, err - } - } - return r, err - }) - } -} - -// WithEscapedPathParameters returns a PrepareDecorator that replaces brace-enclosed keys within the -// request path (i.e., http.Request.URL.Path) with the corresponding values from the passed map. The -// values will be escaped (aka URL encoded) before insertion into the path. -func WithEscapedPathParameters(path string, pathParameters map[string]interface{}) PrepareDecorator { - parameters := escapeValueStrings(ensureValueStrings(pathParameters)) - return func(p Preparer) Preparer { - return PreparerFunc(func(r *http.Request) (*http.Request, error) { - r, err := p.Prepare(r) - if err == nil { - if r.URL == nil { - return r, NewError("autorest", "WithEscapedPathParameters", "Invoked with a nil URL") - } - for key, value := range parameters { - path = strings.Replace(path, "{"+key+"}", value, -1) - } - if r.URL, err = parseURL(r.URL, path); err != nil { - return r, err - } - } - return r, err - }) - } -} - -// WithPathParameters returns a PrepareDecorator that replaces brace-enclosed keys within the -// request path (i.e., http.Request.URL.Path) with the corresponding values from the passed map. -func WithPathParameters(path string, pathParameters map[string]interface{}) PrepareDecorator { - parameters := ensureValueStrings(pathParameters) - return func(p Preparer) Preparer { - return PreparerFunc(func(r *http.Request) (*http.Request, error) { - r, err := p.Prepare(r) - if err == nil { - if r.URL == nil { - return r, NewError("autorest", "WithPathParameters", "Invoked with a nil URL") - } - for key, value := range parameters { - path = strings.Replace(path, "{"+key+"}", value, -1) - } - - if r.URL, err = parseURL(r.URL, path); err != nil { - return r, err - } - } - return r, err - }) - } -} - -func parseURL(u *url.URL, path string) (*url.URL, error) { - p := strings.TrimRight(u.String(), "/") - if !strings.HasPrefix(path, "/") { - path = "/" + path - } - return url.Parse(p + path) -} - -// WithQueryParameters returns a PrepareDecorators that encodes and applies the query parameters -// given in the supplied map (i.e., key=value). -func WithQueryParameters(queryParameters map[string]interface{}) PrepareDecorator { - parameters := ensureValueStrings(queryParameters) - return func(p Preparer) Preparer { - return PreparerFunc(func(r *http.Request) (*http.Request, error) { - r, err := p.Prepare(r) - if err == nil { - if r.URL == nil { - return r, NewError("autorest", "WithQueryParameters", "Invoked with a nil URL") - } - v := r.URL.Query() - for key, value := range parameters { - v.Add(key, value) - } - r.URL.RawQuery = createQuery(v) - } - return r, err - }) - } -} - -// Authorizer is the interface that provides a PrepareDecorator used to supply request -// authorization. Most often, the Authorizer decorator runs last so it has access to the full -// state of the formed HTTP request. -type Authorizer interface { - WithAuthorization() PrepareDecorator -} - -// NullAuthorizer implements a default, "do nothing" Authorizer. -type NullAuthorizer struct{} - -// WithAuthorization returns a PrepareDecorator that does nothing. -func (na NullAuthorizer) WithAuthorization() PrepareDecorator { - return WithNothing() -} diff --git a/vendor/github.com/Azure/go-autorest/autorest/responder.go b/vendor/github.com/Azure/go-autorest/autorest/responder.go deleted file mode 100644 index 87f71e5854b5..000000000000 --- a/vendor/github.com/Azure/go-autorest/autorest/responder.go +++ /dev/null @@ -1,236 +0,0 @@ -package autorest - -import ( - "bytes" - "encoding/json" - "encoding/xml" - "fmt" - "io" - "io/ioutil" - "net/http" - "strings" -) - -// Responder is the interface that wraps the Respond method. -// -// Respond accepts and reacts to an http.Response. Implementations must ensure to not share or hold -// state since Responders may be shared and re-used. -type Responder interface { - Respond(*http.Response) error -} - -// ResponderFunc is a method that implements the Responder interface. -type ResponderFunc func(*http.Response) error - -// Respond implements the Responder interface on ResponderFunc. -func (rf ResponderFunc) Respond(r *http.Response) error { - return rf(r) -} - -// RespondDecorator takes and possibly decorates, by wrapping, a Responder. Decorators may react to -// the http.Response and pass it along or, first, pass the http.Response along then react. -type RespondDecorator func(Responder) Responder - -// CreateResponder creates, decorates, and returns a Responder. Without decorators, the returned -// Responder returns the passed http.Response unmodified. Responders may or may not be safe to share -// and re-used: It depends on the applied decorators. For example, a standard decorator that closes -// the response body is fine to share whereas a decorator that reads the body into a passed struct -// is not. -// -// To prevent memory leaks, ensure that at least one Responder closes the response body. -func CreateResponder(decorators ...RespondDecorator) Responder { - return DecorateResponder( - Responder(ResponderFunc(func(r *http.Response) error { return nil })), - decorators...) -} - -// DecorateResponder accepts a Responder and a, possibly empty, set of RespondDecorators, which it -// applies to the Responder. Decorators are applied in the order received, but their affect upon the -// request depends on whether they are a pre-decorator (react to the http.Response and then pass it -// along) or a post-decorator (pass the http.Response along and then react). -func DecorateResponder(r Responder, decorators ...RespondDecorator) Responder { - for _, decorate := range decorators { - r = decorate(r) - } - return r -} - -// Respond accepts an http.Response and a, possibly empty, set of RespondDecorators. -// It creates a Responder from the decorators it then applies to the passed http.Response. -func Respond(r *http.Response, decorators ...RespondDecorator) error { - if r == nil { - return nil - } - return CreateResponder(decorators...).Respond(r) -} - -// ByIgnoring returns a RespondDecorator that ignores the passed http.Response passing it unexamined -// to the next RespondDecorator. -func ByIgnoring() RespondDecorator { - return func(r Responder) Responder { - return ResponderFunc(func(resp *http.Response) error { - return r.Respond(resp) - }) - } -} - -// ByCopying copies the contents of the http.Response Body into the passed bytes.Buffer as -// the Body is read. -func ByCopying(b *bytes.Buffer) RespondDecorator { - return func(r Responder) Responder { - return ResponderFunc(func(resp *http.Response) error { - err := r.Respond(resp) - if err == nil && resp != nil && resp.Body != nil { - resp.Body = TeeReadCloser(resp.Body, b) - } - return err - }) - } -} - -// ByDiscardingBody returns a RespondDecorator that first invokes the passed Responder after which -// it copies the remaining bytes (if any) in the response body to ioutil.Discard. Since the passed -// Responder is invoked prior to discarding the response body, the decorator may occur anywhere -// within the set. -func ByDiscardingBody() RespondDecorator { - return func(r Responder) Responder { - return ResponderFunc(func(resp *http.Response) error { - err := r.Respond(resp) - if err == nil && resp != nil && resp.Body != nil { - if _, err := io.Copy(ioutil.Discard, resp.Body); err != nil { - return fmt.Errorf("Error discarding the response body: %v", err) - } - } - return err - }) - } -} - -// ByClosing returns a RespondDecorator that first invokes the passed Responder after which it -// closes the response body. Since the passed Responder is invoked prior to closing the response -// body, the decorator may occur anywhere within the set. -func ByClosing() RespondDecorator { - return func(r Responder) Responder { - return ResponderFunc(func(resp *http.Response) error { - err := r.Respond(resp) - if resp != nil && resp.Body != nil { - if err := resp.Body.Close(); err != nil { - return fmt.Errorf("Error closing the response body: %v", err) - } - } - return err - }) - } -} - -// ByClosingIfError returns a RespondDecorator that first invokes the passed Responder after which -// it closes the response if the passed Responder returns an error and the response body exists. -func ByClosingIfError() RespondDecorator { - return func(r Responder) Responder { - return ResponderFunc(func(resp *http.Response) error { - err := r.Respond(resp) - if err != nil && resp != nil && resp.Body != nil { - if err := resp.Body.Close(); err != nil { - return fmt.Errorf("Error closing the response body: %v", err) - } - } - return err - }) - } -} - -// ByUnmarshallingJSON returns a RespondDecorator that decodes a JSON document returned in the -// response Body into the value pointed to by v. -func ByUnmarshallingJSON(v interface{}) RespondDecorator { - return func(r Responder) Responder { - return ResponderFunc(func(resp *http.Response) error { - err := r.Respond(resp) - if err == nil { - b, errInner := ioutil.ReadAll(resp.Body) - // Some responses might include a BOM, remove for successful unmarshalling - b = bytes.TrimPrefix(b, []byte("\xef\xbb\xbf")) - if errInner != nil { - err = fmt.Errorf("Error occurred reading http.Response#Body - Error = '%v'", errInner) - } else if len(strings.Trim(string(b), " ")) > 0 { - errInner = json.Unmarshal(b, v) - if errInner != nil { - err = fmt.Errorf("Error occurred unmarshalling JSON - Error = '%v' JSON = '%s'", errInner, string(b)) - } - } - } - return err - }) - } -} - -// ByUnmarshallingXML returns a RespondDecorator that decodes a XML document returned in the -// response Body into the value pointed to by v. -func ByUnmarshallingXML(v interface{}) RespondDecorator { - return func(r Responder) Responder { - return ResponderFunc(func(resp *http.Response) error { - err := r.Respond(resp) - if err == nil { - b, errInner := ioutil.ReadAll(resp.Body) - if errInner != nil { - err = fmt.Errorf("Error occurred reading http.Response#Body - Error = '%v'", errInner) - } else { - errInner = xml.Unmarshal(b, v) - if errInner != nil { - err = fmt.Errorf("Error occurred unmarshalling Xml - Error = '%v' Xml = '%s'", errInner, string(b)) - } - } - } - return err - }) - } -} - -// WithErrorUnlessStatusCode returns a RespondDecorator that emits an error unless the response -// StatusCode is among the set passed. On error, response body is fully read into a buffer and -// presented in the returned error, as well as in the response body. -func WithErrorUnlessStatusCode(codes ...int) RespondDecorator { - return func(r Responder) Responder { - return ResponderFunc(func(resp *http.Response) error { - err := r.Respond(resp) - if err == nil && !ResponseHasStatusCode(resp, codes...) { - derr := NewErrorWithResponse("autorest", "WithErrorUnlessStatusCode", resp, "%v %v failed with %s", - resp.Request.Method, - resp.Request.URL, - resp.Status) - if resp.Body != nil { - defer resp.Body.Close() - b, _ := ioutil.ReadAll(resp.Body) - derr.ServiceError = b - resp.Body = ioutil.NopCloser(bytes.NewReader(b)) - } - err = derr - } - return err - }) - } -} - -// WithErrorUnlessOK returns a RespondDecorator that emits an error if the response StatusCode is -// anything other than HTTP 200. -func WithErrorUnlessOK() RespondDecorator { - return WithErrorUnlessStatusCode(http.StatusOK) -} - -// ExtractHeader extracts all values of the specified header from the http.Response. It returns an -// empty string slice if the passed http.Response is nil or the header does not exist. -func ExtractHeader(header string, resp *http.Response) []string { - if resp != nil && resp.Header != nil { - return resp.Header[http.CanonicalHeaderKey(header)] - } - return nil -} - -// ExtractHeaderValue extracts the first value of the specified header from the http.Response. It -// returns an empty string if the passed http.Response is nil or the header does not exist. -func ExtractHeaderValue(header string, resp *http.Response) string { - h := ExtractHeader(header, resp) - if len(h) > 0 { - return h[0] - } - return "" -} diff --git a/vendor/github.com/Azure/go-autorest/autorest/sender.go b/vendor/github.com/Azure/go-autorest/autorest/sender.go deleted file mode 100644 index 9c0697815bba..000000000000 --- a/vendor/github.com/Azure/go-autorest/autorest/sender.go +++ /dev/null @@ -1,270 +0,0 @@ -package autorest - -import ( - "bytes" - "fmt" - "io/ioutil" - "log" - "math" - "net/http" - "time" -) - -// Sender is the interface that wraps the Do method to send HTTP requests. -// -// The standard http.Client conforms to this interface. -type Sender interface { - Do(*http.Request) (*http.Response, error) -} - -// SenderFunc is a method that implements the Sender interface. -type SenderFunc func(*http.Request) (*http.Response, error) - -// Do implements the Sender interface on SenderFunc. -func (sf SenderFunc) Do(r *http.Request) (*http.Response, error) { - return sf(r) -} - -// SendDecorator takes and possibily decorates, by wrapping, a Sender. Decorators may affect the -// http.Request and pass it along or, first, pass the http.Request along then react to the -// http.Response result. -type SendDecorator func(Sender) Sender - -// CreateSender creates, decorates, and returns, as a Sender, the default http.Client. -func CreateSender(decorators ...SendDecorator) Sender { - return DecorateSender(&http.Client{}, decorators...) -} - -// DecorateSender accepts a Sender and a, possibly empty, set of SendDecorators, which is applies to -// the Sender. Decorators are applied in the order received, but their affect upon the request -// depends on whether they are a pre-decorator (change the http.Request and then pass it along) or a -// post-decorator (pass the http.Request along and react to the results in http.Response). -func DecorateSender(s Sender, decorators ...SendDecorator) Sender { - for _, decorate := range decorators { - s = decorate(s) - } - return s -} - -// Send sends, by means of the default http.Client, the passed http.Request, returning the -// http.Response and possible error. It also accepts a, possibly empty, set of SendDecorators which -// it will apply the http.Client before invoking the Do method. -// -// Send is a convenience method and not recommended for production. Advanced users should use -// SendWithSender, passing and sharing their own Sender (e.g., instance of http.Client). -// -// Send will not poll or retry requests. -func Send(r *http.Request, decorators ...SendDecorator) (*http.Response, error) { - return SendWithSender(&http.Client{}, r, decorators...) -} - -// SendWithSender sends the passed http.Request, through the provided Sender, returning the -// http.Response and possible error. It also accepts a, possibly empty, set of SendDecorators which -// it will apply the http.Client before invoking the Do method. -// -// SendWithSender will not poll or retry requests. -func SendWithSender(s Sender, r *http.Request, decorators ...SendDecorator) (*http.Response, error) { - return DecorateSender(s, decorators...).Do(r) -} - -// AfterDelay returns a SendDecorator that delays for the passed time.Duration before -// invoking the Sender. The delay may be terminated by closing the optional channel on the -// http.Request. If canceled, no further Senders are invoked. -func AfterDelay(d time.Duration) SendDecorator { - return func(s Sender) Sender { - return SenderFunc(func(r *http.Request) (*http.Response, error) { - if !DelayForBackoff(d, 0, r.Cancel) { - return nil, fmt.Errorf("autorest: AfterDelay canceled before full delay") - } - return s.Do(r) - }) - } -} - -// AsIs returns a SendDecorator that invokes the passed Sender without modifying the http.Request. -func AsIs() SendDecorator { - return func(s Sender) Sender { - return SenderFunc(func(r *http.Request) (*http.Response, error) { - return s.Do(r) - }) - } -} - -// DoCloseIfError returns a SendDecorator that first invokes the passed Sender after which -// it closes the response if the passed Sender returns an error and the response body exists. -func DoCloseIfError() SendDecorator { - return func(s Sender) Sender { - return SenderFunc(func(r *http.Request) (*http.Response, error) { - resp, err := s.Do(r) - if err != nil { - Respond(resp, ByDiscardingBody(), ByClosing()) - } - return resp, err - }) - } -} - -// DoErrorIfStatusCode returns a SendDecorator that emits an error if the response StatusCode is -// among the set passed. Since these are artificial errors, the response body may still require -// closing. -func DoErrorIfStatusCode(codes ...int) SendDecorator { - return func(s Sender) Sender { - return SenderFunc(func(r *http.Request) (*http.Response, error) { - resp, err := s.Do(r) - if err == nil && ResponseHasStatusCode(resp, codes...) { - err = NewErrorWithResponse("autorest", "DoErrorIfStatusCode", resp, "%v %v failed with %s", - resp.Request.Method, - resp.Request.URL, - resp.Status) - } - return resp, err - }) - } -} - -// DoErrorUnlessStatusCode returns a SendDecorator that emits an error unless the response -// StatusCode is among the set passed. Since these are artificial errors, the response body -// may still require closing. -func DoErrorUnlessStatusCode(codes ...int) SendDecorator { - return func(s Sender) Sender { - return SenderFunc(func(r *http.Request) (*http.Response, error) { - resp, err := s.Do(r) - if err == nil && !ResponseHasStatusCode(resp, codes...) { - err = NewErrorWithResponse("autorest", "DoErrorUnlessStatusCode", resp, "%v %v failed with %s", - resp.Request.Method, - resp.Request.URL, - resp.Status) - } - return resp, err - }) - } -} - -// DoPollForStatusCodes returns a SendDecorator that polls if the http.Response contains one of the -// passed status codes. It expects the http.Response to contain a Location header providing the -// URL at which to poll (using GET) and will poll until the time passed is equal to or greater than -// the supplied duration. It will delay between requests for the duration specified in the -// RetryAfter header or, if the header is absent, the passed delay. Polling may be canceled by -// closing the optional channel on the http.Request. -func DoPollForStatusCodes(duration time.Duration, delay time.Duration, codes ...int) SendDecorator { - return func(s Sender) Sender { - return SenderFunc(func(r *http.Request) (resp *http.Response, err error) { - resp, err = s.Do(r) - - if err == nil && ResponseHasStatusCode(resp, codes...) { - r, err = NewPollingRequest(resp, r.Cancel) - - for err == nil && ResponseHasStatusCode(resp, codes...) { - Respond(resp, - ByDiscardingBody(), - ByClosing()) - resp, err = SendWithSender(s, r, - AfterDelay(GetRetryAfter(resp, delay))) - } - } - - return resp, err - }) - } -} - -// DoRetryForAttempts returns a SendDecorator that retries a failed request for up to the specified -// number of attempts, exponentially backing off between requests using the supplied backoff -// time.Duration (which may be zero). Retrying may be canceled by closing the optional channel on -// the http.Request. -func DoRetryForAttempts(attempts int, backoff time.Duration) SendDecorator { - return func(s Sender) Sender { - return SenderFunc(func(r *http.Request) (resp *http.Response, err error) { - for attempt := 0; attempt < attempts; attempt++ { - resp, err = s.Do(r) - if err == nil { - return resp, err - } - DelayForBackoff(backoff, attempt, r.Cancel) - } - return resp, err - }) - } -} - -// DoRetryForStatusCodes returns a SendDecorator that retries for specified statusCodes for up to the specified -// number of attempts, exponentially backing off between requests using the supplied backoff -// time.Duration (which may be zero). Retrying may be canceled by closing the optional channel on -// the http.Request. -func DoRetryForStatusCodes(attempts int, backoff time.Duration, codes ...int) SendDecorator { - return func(s Sender) Sender { - return SenderFunc(func(r *http.Request) (resp *http.Response, err error) { - b := []byte{} - if r.Body != nil { - b, err = ioutil.ReadAll(r.Body) - if err != nil { - return resp, err - } - } - - // Increment to add the first call (attempts denotes number of retries) - attempts++ - for attempt := 0; attempt < attempts; attempt++ { - r.Body = ioutil.NopCloser(bytes.NewBuffer(b)) - resp, err = s.Do(r) - if err != nil || !ResponseHasStatusCode(resp, codes...) { - return resp, err - } - DelayForBackoff(backoff, attempt, r.Cancel) - } - return resp, err - }) - } -} - -// DoRetryForDuration returns a SendDecorator that retries the request until the total time is equal -// to or greater than the specified duration, exponentially backing off between requests using the -// supplied backoff time.Duration (which may be zero). Retrying may be canceled by closing the -// optional channel on the http.Request. -func DoRetryForDuration(d time.Duration, backoff time.Duration) SendDecorator { - return func(s Sender) Sender { - return SenderFunc(func(r *http.Request) (resp *http.Response, err error) { - end := time.Now().Add(d) - for attempt := 0; time.Now().Before(end); attempt++ { - resp, err = s.Do(r) - if err == nil { - return resp, err - } - DelayForBackoff(backoff, attempt, r.Cancel) - } - return resp, err - }) - } -} - -// WithLogging returns a SendDecorator that implements simple before and after logging of the -// request. -func WithLogging(logger *log.Logger) SendDecorator { - return func(s Sender) Sender { - return SenderFunc(func(r *http.Request) (*http.Response, error) { - logger.Printf("Sending %s %s", r.Method, r.URL) - resp, err := s.Do(r) - if err != nil { - logger.Printf("%s %s received error '%v'", r.Method, r.URL, err) - } else { - logger.Printf("%s %s received %s", r.Method, r.URL, resp.Status) - } - return resp, err - }) - } -} - -// DelayForBackoff invokes time.After for the supplied backoff duration raised to the power of -// passed attempt (i.e., an exponential backoff delay). Backoff duration is in seconds and can set -// to zero for no delay. The delay may be canceled by closing the passed channel. If terminated early, -// returns false. -// Note: Passing attempt 1 will result in doubling "backoff" duration. Treat this as a zero-based attempt -// count. -func DelayForBackoff(backoff time.Duration, attempt int, cancel <-chan struct{}) bool { - select { - case <-time.After(time.Duration(backoff.Seconds()*math.Pow(2, float64(attempt))) * time.Second): - return true - case <-cancel: - return false - } -} diff --git a/vendor/github.com/Azure/go-autorest/autorest/utility.go b/vendor/github.com/Azure/go-autorest/autorest/utility.go deleted file mode 100644 index 78067148b28d..000000000000 --- a/vendor/github.com/Azure/go-autorest/autorest/utility.go +++ /dev/null @@ -1,178 +0,0 @@ -package autorest - -import ( - "bytes" - "encoding/json" - "encoding/xml" - "fmt" - "io" - "net/url" - "reflect" - "sort" - "strings" -) - -// EncodedAs is a series of constants specifying various data encodings -type EncodedAs string - -const ( - // EncodedAsJSON states that data is encoded as JSON - EncodedAsJSON EncodedAs = "JSON" - - // EncodedAsXML states that data is encoded as Xml - EncodedAsXML EncodedAs = "XML" -) - -// Decoder defines the decoding method json.Decoder and xml.Decoder share -type Decoder interface { - Decode(v interface{}) error -} - -// NewDecoder creates a new decoder appropriate to the passed encoding. -// encodedAs specifies the type of encoding and r supplies the io.Reader containing the -// encoded data. -func NewDecoder(encodedAs EncodedAs, r io.Reader) Decoder { - if encodedAs == EncodedAsJSON { - return json.NewDecoder(r) - } else if encodedAs == EncodedAsXML { - return xml.NewDecoder(r) - } - return nil -} - -// CopyAndDecode decodes the data from the passed io.Reader while making a copy. Having a copy -// is especially useful if there is a chance the data will fail to decode. -// encodedAs specifies the expected encoding, r provides the io.Reader to the data, and v -// is the decoding destination. -func CopyAndDecode(encodedAs EncodedAs, r io.Reader, v interface{}) (bytes.Buffer, error) { - b := bytes.Buffer{} - return b, NewDecoder(encodedAs, io.TeeReader(r, &b)).Decode(v) -} - -// TeeReadCloser returns a ReadCloser that writes to w what it reads from rc. -// It utilizes io.TeeReader to copy the data read and has the same behavior when reading. -// Further, when it is closed, it ensures that rc is closed as well. -func TeeReadCloser(rc io.ReadCloser, w io.Writer) io.ReadCloser { - return &teeReadCloser{rc, io.TeeReader(rc, w)} -} - -type teeReadCloser struct { - rc io.ReadCloser - r io.Reader -} - -func (t *teeReadCloser) Read(p []byte) (int, error) { - return t.r.Read(p) -} - -func (t *teeReadCloser) Close() error { - return t.rc.Close() -} - -func containsInt(ints []int, n int) bool { - for _, i := range ints { - if i == n { - return true - } - } - return false -} - -func escapeValueStrings(m map[string]string) map[string]string { - for key, value := range m { - m[key] = url.QueryEscape(value) - } - return m -} - -func ensureValueStrings(mapOfInterface map[string]interface{}) map[string]string { - mapOfStrings := make(map[string]string) - for key, value := range mapOfInterface { - mapOfStrings[key] = ensureValueString(value) - } - return mapOfStrings -} - -func ensureValueString(value interface{}) string { - if value == nil { - return "" - } - switch v := value.(type) { - case string: - return v - case []byte: - return string(v) - default: - return fmt.Sprintf("%v", v) - } -} - -// MapToValues method converts map[string]interface{} to url.Values. -func MapToValues(m map[string]interface{}) url.Values { - v := url.Values{} - for key, value := range m { - x := reflect.ValueOf(value) - if x.Kind() == reflect.Array || x.Kind() == reflect.Slice { - for i := 0; i < x.Len(); i++ { - v.Add(key, ensureValueString(x.Index(i))) - } - } else { - v.Add(key, ensureValueString(value)) - } - } - return v -} - -// String method converts interface v to string. If interface is a list, it -// joins list elements using separator. -func String(v interface{}, sep ...string) string { - if len(sep) > 0 { - return ensureValueString(strings.Join(v.([]string), sep[0])) - } - return ensureValueString(v) -} - -// Encode method encodes url path and query parameters. -func Encode(location string, v interface{}, sep ...string) string { - s := String(v, sep...) - switch strings.ToLower(location) { - case "path": - return pathEscape(s) - case "query": - return queryEscape(s) - default: - return s - } -} - -func pathEscape(s string) string { - return strings.Replace(url.QueryEscape(s), "+", "%20", -1) -} - -func queryEscape(s string) string { - return url.QueryEscape(s) -} - -// This method is same as Encode() method of "net/url" go package, -// except it does not encode the query parameters because they -// already come encoded. It formats values map in query format (bar=foo&a=b). -func createQuery(v url.Values) string { - var buf bytes.Buffer - keys := make([]string, 0, len(v)) - for k := range v { - keys = append(keys, k) - } - sort.Strings(keys) - for _, k := range keys { - vs := v[k] - prefix := url.QueryEscape(k) + "=" - for _, v := range vs { - if buf.Len() > 0 { - buf.WriteByte('&') - } - buf.WriteString(prefix) - buf.WriteString(v) - } - } - return buf.String() -} diff --git a/vendor/github.com/Azure/go-autorest/autorest/version.go b/vendor/github.com/Azure/go-autorest/autorest/version.go deleted file mode 100644 index 7a0bf9c9ffb8..000000000000 --- a/vendor/github.com/Azure/go-autorest/autorest/version.go +++ /dev/null @@ -1,23 +0,0 @@ -package autorest - -import ( - "fmt" -) - -const ( - major = "7" - minor = "3" - patch = "0" - tag = "" - semVerFormat = "%s.%s.%s%s" -) - -var version string - -// Version returns the semantic version (see http://semver.org). -func Version() string { - if version == "" { - version = fmt.Sprintf(semVerFormat, major, minor, patch, tag) - } - return version -} diff --git a/vendor/github.com/codahale/hdrhistogram/LICENSE b/vendor/github.com/codahale/hdrhistogram/LICENSE new file mode 100644 index 000000000000..f9835c241fc4 --- /dev/null +++ b/vendor/github.com/codahale/hdrhistogram/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2014 Coda Hale + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/codahale/hdrhistogram/README.md b/vendor/github.com/codahale/hdrhistogram/README.md new file mode 100644 index 000000000000..614b197c3dd3 --- /dev/null +++ b/vendor/github.com/codahale/hdrhistogram/README.md @@ -0,0 +1,15 @@ +hdrhistogram +============ + +[![Build Status](https://travis-ci.org/codahale/hdrhistogram.png?branch=master)](https://travis-ci.org/codahale/hdrhistogram) + +A pure Go implementation of the [HDR Histogram](https://github.com/HdrHistogram/HdrHistogram). + +> A Histogram that supports recording and analyzing sampled data value counts +> across a configurable integer value range with configurable value precision +> within the range. Value precision is expressed as the number of significant +> digits in the value recording, and provides control over value quantization +> behavior across the value range and the subsequent value resolution at any +> given level. + +For documentation, check [godoc](http://godoc.org/github.com/codahale/hdrhistogram). diff --git a/vendor/github.com/codahale/hdrhistogram/hdr.go b/vendor/github.com/codahale/hdrhistogram/hdr.go new file mode 100644 index 000000000000..c97842926d67 --- /dev/null +++ b/vendor/github.com/codahale/hdrhistogram/hdr.go @@ -0,0 +1,564 @@ +// Package hdrhistogram provides an implementation of Gil Tene's HDR Histogram +// data structure. The HDR Histogram allows for fast and accurate analysis of +// the extreme ranges of data with non-normal distributions, like latency. +package hdrhistogram + +import ( + "fmt" + "math" +) + +// A Bracket is a part of a cumulative distribution. +type Bracket struct { + Quantile float64 + Count, ValueAt int64 +} + +// A Snapshot is an exported view of a Histogram, useful for serializing them. +// A Histogram can be constructed from it by passing it to Import. +type Snapshot struct { + LowestTrackableValue int64 + HighestTrackableValue int64 + SignificantFigures int64 + Counts []int64 +} + +// A Histogram is a lossy data structure used to record the distribution of +// non-normally distributed data (like latency) with a high degree of accuracy +// and a bounded degree of precision. +type Histogram struct { + lowestTrackableValue int64 + highestTrackableValue int64 + unitMagnitude int64 + significantFigures int64 + subBucketHalfCountMagnitude int32 + subBucketHalfCount int32 + subBucketMask int64 + subBucketCount int32 + bucketCount int32 + countsLen int32 + totalCount int64 + counts []int64 +} + +// New returns a new Histogram instance capable of tracking values in the given +// range and with the given amount of precision. +func New(minValue, maxValue int64, sigfigs int) *Histogram { + if sigfigs < 1 || 5 < sigfigs { + panic(fmt.Errorf("sigfigs must be [1,5] (was %d)", sigfigs)) + } + + largestValueWithSingleUnitResolution := 2 * math.Pow10(sigfigs) + subBucketCountMagnitude := int32(math.Ceil(math.Log2(float64(largestValueWithSingleUnitResolution)))) + + subBucketHalfCountMagnitude := subBucketCountMagnitude + if subBucketHalfCountMagnitude < 1 { + subBucketHalfCountMagnitude = 1 + } + subBucketHalfCountMagnitude-- + + unitMagnitude := int32(math.Floor(math.Log2(float64(minValue)))) + if unitMagnitude < 0 { + unitMagnitude = 0 + } + + subBucketCount := int32(math.Pow(2, float64(subBucketHalfCountMagnitude)+1)) + + subBucketHalfCount := subBucketCount / 2 + subBucketMask := int64(subBucketCount-1) << uint(unitMagnitude) + + // determine exponent range needed to support the trackable value with no + // overflow: + smallestUntrackableValue := int64(subBucketCount) << uint(unitMagnitude) + bucketsNeeded := int32(1) + for smallestUntrackableValue < maxValue { + smallestUntrackableValue <<= 1 + bucketsNeeded++ + } + + bucketCount := bucketsNeeded + countsLen := (bucketCount + 1) * (subBucketCount / 2) + + return &Histogram{ + lowestTrackableValue: minValue, + highestTrackableValue: maxValue, + unitMagnitude: int64(unitMagnitude), + significantFigures: int64(sigfigs), + subBucketHalfCountMagnitude: subBucketHalfCountMagnitude, + subBucketHalfCount: subBucketHalfCount, + subBucketMask: subBucketMask, + subBucketCount: subBucketCount, + bucketCount: bucketCount, + countsLen: countsLen, + totalCount: 0, + counts: make([]int64, countsLen), + } +} + +// ByteSize returns an estimate of the amount of memory allocated to the +// histogram in bytes. +// +// N.B.: This does not take into account the overhead for slices, which are +// small, constant, and specific to the compiler version. +func (h *Histogram) ByteSize() int { + return 6*8 + 5*4 + len(h.counts)*8 +} + +// Merge merges the data stored in the given histogram with the receiver, +// returning the number of recorded values which had to be dropped. +func (h *Histogram) Merge(from *Histogram) (dropped int64) { + i := from.rIterator() + for i.next() { + v := i.valueFromIdx + c := i.countAtIdx + + if h.RecordValues(v, c) != nil { + dropped += c + } + } + + return +} + +// TotalCount returns total number of values recorded. +func (h *Histogram) TotalCount() int64 { + return h.totalCount +} + +// Max returns the approximate maximum recorded value. +func (h *Histogram) Max() int64 { + var max int64 + i := h.iterator() + for i.next() { + if i.countAtIdx != 0 { + max = i.highestEquivalentValue + } + } + return h.highestEquivalentValue(max) +} + +// Min returns the approximate minimum recorded value. +func (h *Histogram) Min() int64 { + var min int64 + i := h.iterator() + for i.next() { + if i.countAtIdx != 0 && min == 0 { + min = i.highestEquivalentValue + break + } + } + return h.lowestEquivalentValue(min) +} + +// Mean returns the approximate arithmetic mean of the recorded values. +func (h *Histogram) Mean() float64 { + if h.totalCount == 0 { + return 0 + } + var total int64 + i := h.iterator() + for i.next() { + if i.countAtIdx != 0 { + total += i.countAtIdx * h.medianEquivalentValue(i.valueFromIdx) + } + } + return float64(total) / float64(h.totalCount) +} + +// StdDev returns the approximate standard deviation of the recorded values. +func (h *Histogram) StdDev() float64 { + if h.totalCount == 0 { + return 0 + } + + mean := h.Mean() + geometricDevTotal := 0.0 + + i := h.iterator() + for i.next() { + if i.countAtIdx != 0 { + dev := float64(h.medianEquivalentValue(i.valueFromIdx)) - mean + geometricDevTotal += (dev * dev) * float64(i.countAtIdx) + } + } + + return math.Sqrt(geometricDevTotal / float64(h.totalCount)) +} + +// Reset deletes all recorded values and restores the histogram to its original +// state. +func (h *Histogram) Reset() { + h.totalCount = 0 + for i := range h.counts { + h.counts[i] = 0 + } +} + +// RecordValue records the given value, returning an error if the value is out +// of range. +func (h *Histogram) RecordValue(v int64) error { + return h.RecordValues(v, 1) +} + +// RecordCorrectedValue records the given value, correcting for stalls in the +// recording process. This only works for processes which are recording values +// at an expected interval (e.g., doing jitter analysis). Processes which are +// recording ad-hoc values (e.g., latency for incoming requests) can't take +// advantage of this. +func (h *Histogram) RecordCorrectedValue(v, expectedInterval int64) error { + if err := h.RecordValue(v); err != nil { + return err + } + + if expectedInterval <= 0 || v <= expectedInterval { + return nil + } + + missingValue := v - expectedInterval + for missingValue >= expectedInterval { + if err := h.RecordValue(missingValue); err != nil { + return err + } + missingValue -= expectedInterval + } + + return nil +} + +// RecordValues records n occurrences of the given value, returning an error if +// the value is out of range. +func (h *Histogram) RecordValues(v, n int64) error { + idx := h.countsIndexFor(v) + if idx < 0 || int(h.countsLen) <= idx { + return fmt.Errorf("value %d is too large to be recorded", v) + } + h.counts[idx] += n + h.totalCount += n + + return nil +} + +// ValueAtQuantile returns the recorded value at the given quantile (0..100). +func (h *Histogram) ValueAtQuantile(q float64) int64 { + if q > 100 { + q = 100 + } + + total := int64(0) + countAtPercentile := int64(((q / 100) * float64(h.totalCount)) + 0.5) + + i := h.iterator() + for i.next() { + total += i.countAtIdx + if total >= countAtPercentile { + return h.highestEquivalentValue(i.valueFromIdx) + } + } + + return 0 +} + +// CumulativeDistribution returns an ordered list of brackets of the +// distribution of recorded values. +func (h *Histogram) CumulativeDistribution() []Bracket { + var result []Bracket + + i := h.pIterator(1) + for i.next() { + result = append(result, Bracket{ + Quantile: i.percentile, + Count: i.countToIdx, + ValueAt: i.highestEquivalentValue, + }) + } + + return result +} + +// SignificantFigures returns the significant figures used to create the +// histogram +func (h *Histogram) SignificantFigures() int64 { + return h.significantFigures +} + +// LowestTrackableValue returns the lower bound on values that will be added +// to the histogram +func (h *Histogram) LowestTrackableValue() int64 { + return h.lowestTrackableValue +} + +// HighestTrackableValue returns the upper bound on values that will be added +// to the histogram +func (h *Histogram) HighestTrackableValue() int64 { + return h.highestTrackableValue +} + +// Histogram bar for plotting +type Bar struct { + From, To, Count int64 +} + +// Pretty print as csv for easy plotting +func (b Bar) String() string { + return fmt.Sprintf("%v, %v, %v\n", b.From, b.To, b.Count) +} + +// Distribution returns an ordered list of bars of the +// distribution of recorded values, counts can be normalized to a probability +func (h *Histogram) Distribution() (result []Bar) { + i := h.iterator() + for i.next() { + result = append(result, Bar{ + Count: i.countAtIdx, + From: h.lowestEquivalentValue(i.valueFromIdx), + To: i.highestEquivalentValue, + }) + } + + return result +} + +// Equals returns true if the two Histograms are equivalent, false if not. +func (h *Histogram) Equals(other *Histogram) bool { + switch { + case + h.lowestTrackableValue != other.lowestTrackableValue, + h.highestTrackableValue != other.highestTrackableValue, + h.unitMagnitude != other.unitMagnitude, + h.significantFigures != other.significantFigures, + h.subBucketHalfCountMagnitude != other.subBucketHalfCountMagnitude, + h.subBucketHalfCount != other.subBucketHalfCount, + h.subBucketMask != other.subBucketMask, + h.subBucketCount != other.subBucketCount, + h.bucketCount != other.bucketCount, + h.countsLen != other.countsLen, + h.totalCount != other.totalCount: + return false + default: + for i, c := range h.counts { + if c != other.counts[i] { + return false + } + } + } + return true +} + +// Export returns a snapshot view of the Histogram. This can be later passed to +// Import to construct a new Histogram with the same state. +func (h *Histogram) Export() *Snapshot { + return &Snapshot{ + LowestTrackableValue: h.lowestTrackableValue, + HighestTrackableValue: h.highestTrackableValue, + SignificantFigures: h.significantFigures, + Counts: append([]int64(nil), h.counts...), // copy + } +} + +// Import returns a new Histogram populated from the Snapshot data (which the +// caller must stop accessing). +func Import(s *Snapshot) *Histogram { + h := New(s.LowestTrackableValue, s.HighestTrackableValue, int(s.SignificantFigures)) + h.counts = s.Counts + totalCount := int64(0) + for i := int32(0); i < h.countsLen; i++ { + countAtIndex := h.counts[i] + if countAtIndex > 0 { + totalCount += countAtIndex + } + } + h.totalCount = totalCount + return h +} + +func (h *Histogram) iterator() *iterator { + return &iterator{ + h: h, + subBucketIdx: -1, + } +} + +func (h *Histogram) rIterator() *rIterator { + return &rIterator{ + iterator: iterator{ + h: h, + subBucketIdx: -1, + }, + } +} + +func (h *Histogram) pIterator(ticksPerHalfDistance int32) *pIterator { + return &pIterator{ + iterator: iterator{ + h: h, + subBucketIdx: -1, + }, + ticksPerHalfDistance: ticksPerHalfDistance, + } +} + +func (h *Histogram) sizeOfEquivalentValueRange(v int64) int64 { + bucketIdx := h.getBucketIndex(v) + subBucketIdx := h.getSubBucketIdx(v, bucketIdx) + adjustedBucket := bucketIdx + if subBucketIdx >= h.subBucketCount { + adjustedBucket++ + } + return int64(1) << uint(h.unitMagnitude+int64(adjustedBucket)) +} + +func (h *Histogram) valueFromIndex(bucketIdx, subBucketIdx int32) int64 { + return int64(subBucketIdx) << uint(int64(bucketIdx)+h.unitMagnitude) +} + +func (h *Histogram) lowestEquivalentValue(v int64) int64 { + bucketIdx := h.getBucketIndex(v) + subBucketIdx := h.getSubBucketIdx(v, bucketIdx) + return h.valueFromIndex(bucketIdx, subBucketIdx) +} + +func (h *Histogram) nextNonEquivalentValue(v int64) int64 { + return h.lowestEquivalentValue(v) + h.sizeOfEquivalentValueRange(v) +} + +func (h *Histogram) highestEquivalentValue(v int64) int64 { + return h.nextNonEquivalentValue(v) - 1 +} + +func (h *Histogram) medianEquivalentValue(v int64) int64 { + return h.lowestEquivalentValue(v) + (h.sizeOfEquivalentValueRange(v) >> 1) +} + +func (h *Histogram) getCountAtIndex(bucketIdx, subBucketIdx int32) int64 { + return h.counts[h.countsIndex(bucketIdx, subBucketIdx)] +} + +func (h *Histogram) countsIndex(bucketIdx, subBucketIdx int32) int32 { + bucketBaseIdx := (bucketIdx + 1) << uint(h.subBucketHalfCountMagnitude) + offsetInBucket := subBucketIdx - h.subBucketHalfCount + return bucketBaseIdx + offsetInBucket +} + +func (h *Histogram) getBucketIndex(v int64) int32 { + pow2Ceiling := bitLen(v | h.subBucketMask) + return int32(pow2Ceiling - int64(h.unitMagnitude) - + int64(h.subBucketHalfCountMagnitude+1)) +} + +func (h *Histogram) getSubBucketIdx(v int64, idx int32) int32 { + return int32(v >> uint(int64(idx)+int64(h.unitMagnitude))) +} + +func (h *Histogram) countsIndexFor(v int64) int { + bucketIdx := h.getBucketIndex(v) + subBucketIdx := h.getSubBucketIdx(v, bucketIdx) + return int(h.countsIndex(bucketIdx, subBucketIdx)) +} + +type iterator struct { + h *Histogram + bucketIdx, subBucketIdx int32 + countAtIdx, countToIdx, valueFromIdx int64 + highestEquivalentValue int64 +} + +func (i *iterator) next() bool { + if i.countToIdx >= i.h.totalCount { + return false + } + + // increment bucket + i.subBucketIdx++ + if i.subBucketIdx >= i.h.subBucketCount { + i.subBucketIdx = i.h.subBucketHalfCount + i.bucketIdx++ + } + + if i.bucketIdx >= i.h.bucketCount { + return false + } + + i.countAtIdx = i.h.getCountAtIndex(i.bucketIdx, i.subBucketIdx) + i.countToIdx += i.countAtIdx + i.valueFromIdx = i.h.valueFromIndex(i.bucketIdx, i.subBucketIdx) + i.highestEquivalentValue = i.h.highestEquivalentValue(i.valueFromIdx) + + return true +} + +type rIterator struct { + iterator + countAddedThisStep int64 +} + +func (r *rIterator) next() bool { + for r.iterator.next() { + if r.countAtIdx != 0 { + r.countAddedThisStep = r.countAtIdx + return true + } + } + return false +} + +type pIterator struct { + iterator + seenLastValue bool + ticksPerHalfDistance int32 + percentileToIteratorTo float64 + percentile float64 +} + +func (p *pIterator) next() bool { + if !(p.countToIdx < p.h.totalCount) { + if p.seenLastValue { + return false + } + + p.seenLastValue = true + p.percentile = 100 + + return true + } + + if p.subBucketIdx == -1 && !p.iterator.next() { + return false + } + + var done = false + for !done { + currentPercentile := (100.0 * float64(p.countToIdx)) / float64(p.h.totalCount) + if p.countAtIdx != 0 && p.percentileToIteratorTo <= currentPercentile { + p.percentile = p.percentileToIteratorTo + halfDistance := math.Trunc(math.Pow(2, math.Trunc(math.Log2(100.0/(100.0-p.percentileToIteratorTo)))+1)) + percentileReportingTicks := float64(p.ticksPerHalfDistance) * halfDistance + p.percentileToIteratorTo += 100.0 / percentileReportingTicks + return true + } + done = !p.iterator.next() + } + + return true +} + +func bitLen(x int64) (n int64) { + for ; x >= 0x8000; x >>= 16 { + n += 16 + } + if x >= 0x80 { + x >>= 8 + n += 8 + } + if x >= 0x8 { + x >>= 4 + n += 4 + } + if x >= 0x2 { + x >>= 2 + n += 2 + } + if x >= 0x1 { + n++ + } + return +} diff --git a/vendor/github.com/codahale/hdrhistogram/window.go b/vendor/github.com/codahale/hdrhistogram/window.go new file mode 100644 index 000000000000..dc43612a4b60 --- /dev/null +++ b/vendor/github.com/codahale/hdrhistogram/window.go @@ -0,0 +1,45 @@ +package hdrhistogram + +// A WindowedHistogram combines histograms to provide windowed statistics. +type WindowedHistogram struct { + idx int + h []Histogram + m *Histogram + + Current *Histogram +} + +// NewWindowed creates a new WindowedHistogram with N underlying histograms with +// the given parameters. +func NewWindowed(n int, minValue, maxValue int64, sigfigs int) *WindowedHistogram { + w := WindowedHistogram{ + idx: -1, + h: make([]Histogram, n), + m: New(minValue, maxValue, sigfigs), + } + + for i := range w.h { + w.h[i] = *New(minValue, maxValue, sigfigs) + } + w.Rotate() + + return &w +} + +// Merge returns a histogram which includes the recorded values from all the +// sections of the window. +func (w *WindowedHistogram) Merge() *Histogram { + w.m.Reset() + for _, h := range w.h { + w.m.Merge(&h) + } + return w.m +} + +// Rotate resets the oldest histogram and rotates it to be used as the current +// histogram. +func (w *WindowedHistogram) Rotate() { + w.idx++ + w.Current = &w.h[w.idx%len(w.h)] + w.Current.Reset() +} diff --git a/vendor/gopkg.in/fatih/set.v0/LICENSE.md b/vendor/github.com/deckarep/golang-set/LICENSE similarity index 50% rename from vendor/gopkg.in/fatih/set.v0/LICENSE.md rename to vendor/github.com/deckarep/golang-set/LICENSE index 25fdaf639dfc..b5768f89cfd2 100644 --- a/vendor/gopkg.in/fatih/set.v0/LICENSE.md +++ b/vendor/github.com/deckarep/golang-set/LICENSE @@ -1,20 +1,22 @@ -The MIT License (MIT) +Open Source Initiative OSI - The MIT License (MIT):Licensing -Copyright (c) 2013 Fatih Arslan +The MIT License (MIT) +Copyright (c) 2013 Ralph Caraveo (deckarep@gmail.com) Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to -use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of -the Software, and to permit persons to whom the Software is furnished to do so, -subject to the following conditions: +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS -FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR -COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. \ No newline at end of file diff --git a/vendor/github.com/deckarep/golang-set/README.md b/vendor/github.com/deckarep/golang-set/README.md new file mode 100644 index 000000000000..c3b50b2c5c63 --- /dev/null +++ b/vendor/github.com/deckarep/golang-set/README.md @@ -0,0 +1,95 @@ +[![Build Status](https://travis-ci.org/deckarep/golang-set.svg?branch=master)](https://travis-ci.org/deckarep/golang-set) +[![Go Report Card](https://goreportcard.com/badge/github.com/deckarep/golang-set)](https://goreportcard.com/report/github.com/deckarep/golang-set) +[![GoDoc](https://godoc.org/github.com/deckarep/golang-set?status.svg)](http://godoc.org/github.com/deckarep/golang-set) + +## golang-set + + +The missing set collection for the Go language. Until Go has sets built-in...use this. + +Coming from Python one of the things I miss is the superbly wonderful set collection. This is my attempt to mimic the primary features of the set from Python. +You can of course argue that there is no need for a set in Go, otherwise the creators would have added one to the standard library. To those I say simply ignore this repository +and carry-on and to the rest that find this useful please contribute in helping me make it better by: + +* Helping to make more idiomatic improvements to the code. +* Helping to increase the performance of it. ~~(So far, no attempt has been made, but since it uses a map internally, I expect it to be mostly performant.)~~ +* Helping to make the unit-tests more robust and kick-ass. +* Helping to fill in the [documentation.](http://godoc.org/github.com/deckarep/golang-set) +* Simply offering feedback and suggestions. (Positive, constructive feedback is appreciated.) + +I have to give some credit for helping seed the idea with this post on [stackoverflow.](http://programmers.stackexchange.com/questions/177428/sets-data-structure-in-golang) + +*Update* - as of 3/9/2014, you can use a compile-time generic version of this package in the [gen](http://clipperhouse.github.io/gen/) framework. This framework allows you to use the golang-set in a completely generic and type-safe way by allowing you to generate a supporting .go file based on your custom types. + +## Features (as of 9/22/2014) + +* a CartesianProduct() method has been added with unit-tests: [Read more about the cartesian product](http://en.wikipedia.org/wiki/Cartesian_product) + +## Features (as of 9/15/2014) + +* a PowerSet() method has been added with unit-tests: [Read more about the Power set](http://en.wikipedia.org/wiki/Power_set) + +## Features (as of 4/22/2014) + +* One common interface to both implementations +* Two set implementations to choose from + * a thread-safe implementation designed for concurrent use + * a non-thread-safe implementation designed for performance +* 75 benchmarks for both implementations +* 35 unit tests for both implementations +* 14 concurrent tests for the thread-safe implementation + + + +Please see the unit test file for additional usage examples. The Python set documentation will also do a better job than I can of explaining how a set typically [works.](http://docs.python.org/2/library/sets.html) Please keep in mind +however that the Python set is a built-in type and supports additional features and syntax that make it awesome. + +## Examples but not exhaustive: + +```go +requiredClasses := mapset.NewSet() +requiredClasses.Add("Cooking") +requiredClasses.Add("English") +requiredClasses.Add("Math") +requiredClasses.Add("Biology") + +scienceSlice := []interface{}{"Biology", "Chemistry"} +scienceClasses := mapset.NewSetFromSlice(scienceSlice) + +electiveClasses := mapset.NewSet() +electiveClasses.Add("Welding") +electiveClasses.Add("Music") +electiveClasses.Add("Automotive") + +bonusClasses := mapset.NewSet() +bonusClasses.Add("Go Programming") +bonusClasses.Add("Python Programming") + +//Show me all the available classes I can take +allClasses := requiredClasses.Union(scienceClasses).Union(electiveClasses).Union(bonusClasses) +fmt.Println(allClasses) //Set{Cooking, English, Math, Chemistry, Welding, Biology, Music, Automotive, Go Programming, Python Programming} + + +//Is cooking considered a science class? +fmt.Println(scienceClasses.Contains("Cooking")) //false + +//Show me all classes that are not science classes, since I hate science. +fmt.Println(allClasses.Difference(scienceClasses)) //Set{Music, Automotive, Go Programming, Python Programming, Cooking, English, Math, Welding} + +//Which science classes are also required classes? +fmt.Println(scienceClasses.Intersect(requiredClasses)) //Set{Biology} + +//How many bonus classes do you offer? +fmt.Println(bonusClasses.Cardinality()) //2 + +//Do you have the following classes? Welding, Automotive and English? +fmt.Println(allClasses.IsSuperset(mapset.NewSetFromSlice([]interface{}{"Welding", "Automotive", "English"}))) //true +``` + +Thanks! + +-Ralph + +[![Bitdeli Badge](https://d2weczhvl823v0.cloudfront.net/deckarep/golang-set/trend.png)](https://bitdeli.com/free "Bitdeli Badge") + +[![Analytics](https://ga-beacon.appspot.com/UA-42584447-2/deckarep/golang-set)](https://github.com/igrigorik/ga-beacon) diff --git a/vendor/github.com/deckarep/golang-set/iterator.go b/vendor/github.com/deckarep/golang-set/iterator.go new file mode 100644 index 000000000000..9dfecade4255 --- /dev/null +++ b/vendor/github.com/deckarep/golang-set/iterator.go @@ -0,0 +1,58 @@ +/* +Open Source Initiative OSI - The MIT License (MIT):Licensing + +The MIT License (MIT) +Copyright (c) 2013 Ralph Caraveo (deckarep@gmail.com) + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. +*/ + +package mapset + +// Iterator defines an iterator over a Set, its C channel can be used to range over the Set's +// elements. +type Iterator struct { + C <-chan interface{} + stop chan struct{} +} + +// Stop stops the Iterator, no further elements will be received on C, C will be closed. +func (i *Iterator) Stop() { + // Allows for Stop() to be called multiple times + // (close() panics when called on already closed channel) + defer func() { + recover() + }() + + close(i.stop) + + // Exhaust any remaining elements. + for range i.C { + } +} + +// newIterator returns a new Iterator instance together with its item and stop channels. +func newIterator() (*Iterator, chan<- interface{}, <-chan struct{}) { + itemChan := make(chan interface{}) + stopChan := make(chan struct{}) + return &Iterator{ + C: itemChan, + stop: stopChan, + }, itemChan, stopChan +} diff --git a/vendor/github.com/deckarep/golang-set/set.go b/vendor/github.com/deckarep/golang-set/set.go new file mode 100644 index 000000000000..29eb2e5a2247 --- /dev/null +++ b/vendor/github.com/deckarep/golang-set/set.go @@ -0,0 +1,217 @@ +/* +Open Source Initiative OSI - The MIT License (MIT):Licensing + +The MIT License (MIT) +Copyright (c) 2013 Ralph Caraveo (deckarep@gmail.com) + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. +*/ + +// Package mapset implements a simple and generic set collection. +// Items stored within it are unordered and unique. It supports +// typical set operations: membership testing, intersection, union, +// difference, symmetric difference and cloning. +// +// Package mapset provides two implementations of the Set +// interface. The default implementation is safe for concurrent +// access, but a non-thread-safe implementation is also provided for +// programs that can benefit from the slight speed improvement and +// that can enforce mutual exclusion through other means. +package mapset + +// Set is the primary interface provided by the mapset package. It +// represents an unordered set of data and a large number of +// operations that can be applied to that set. +type Set interface { + // Adds an element to the set. Returns whether + // the item was added. + Add(i interface{}) bool + + // Returns the number of elements in the set. + Cardinality() int + + // Removes all elements from the set, leaving + // the empty set. + Clear() + + // Returns a clone of the set using the same + // implementation, duplicating all keys. + Clone() Set + + // Returns whether the given items + // are all in the set. + Contains(i ...interface{}) bool + + // Returns the difference between this set + // and other. The returned set will contain + // all elements of this set that are not also + // elements of other. + // + // Note that the argument to Difference + // must be of the same type as the receiver + // of the method. Otherwise, Difference will + // panic. + Difference(other Set) Set + + // Determines if two sets are equal to each + // other. If they have the same cardinality + // and contain the same elements, they are + // considered equal. The order in which + // the elements were added is irrelevant. + // + // Note that the argument to Equal must be + // of the same type as the receiver of the + // method. Otherwise, Equal will panic. + Equal(other Set) bool + + // Returns a new set containing only the elements + // that exist only in both sets. + // + // Note that the argument to Intersect + // must be of the same type as the receiver + // of the method. Otherwise, Intersect will + // panic. + Intersect(other Set) Set + + // Determines if every element in this set is in + // the other set but the two sets are not equal. + // + // Note that the argument to IsProperSubset + // must be of the same type as the receiver + // of the method. Otherwise, IsProperSubset + // will panic. + IsProperSubset(other Set) bool + + // Determines if every element in the other set + // is in this set but the two sets are not + // equal. + // + // Note that the argument to IsSuperset + // must be of the same type as the receiver + // of the method. Otherwise, IsSuperset will + // panic. + IsProperSuperset(other Set) bool + + // Determines if every element in this set is in + // the other set. + // + // Note that the argument to IsSubset + // must be of the same type as the receiver + // of the method. Otherwise, IsSubset will + // panic. + IsSubset(other Set) bool + + // Determines if every element in the other set + // is in this set. + // + // Note that the argument to IsSuperset + // must be of the same type as the receiver + // of the method. Otherwise, IsSuperset will + // panic. + IsSuperset(other Set) bool + + // Iterates over elements and executes the passed func against each element. + // If passed func returns true, stop iteration at the time. + Each(func(interface{}) bool) + + // Returns a channel of elements that you can + // range over. + Iter() <-chan interface{} + + // Returns an Iterator object that you can + // use to range over the set. + Iterator() *Iterator + + // Remove a single element from the set. + Remove(i interface{}) + + // Provides a convenient string representation + // of the current state of the set. + String() string + + // Returns a new set with all elements which are + // in either this set or the other set but not in both. + // + // Note that the argument to SymmetricDifference + // must be of the same type as the receiver + // of the method. Otherwise, SymmetricDifference + // will panic. + SymmetricDifference(other Set) Set + + // Returns a new set with all elements in both sets. + // + // Note that the argument to Union must be of the + + // same type as the receiver of the method. + // Otherwise, IsSuperset will panic. + Union(other Set) Set + + // Pop removes and returns an arbitrary item from the set. + Pop() interface{} + + // Returns all subsets of a given set (Power Set). + PowerSet() Set + + // Returns the Cartesian Product of two sets. + CartesianProduct(other Set) Set + + // Returns the members of the set as a slice. + ToSlice() []interface{} +} + +// NewSet creates and returns a reference to an empty set. Operations +// on the resulting set are thread-safe. +func NewSet(s ...interface{}) Set { + set := newThreadSafeSet() + for _, item := range s { + set.Add(item) + } + return &set +} + +// NewSetWith creates and returns a new set with the given elements. +// Operations on the resulting set are thread-safe. +func NewSetWith(elts ...interface{}) Set { + return NewSetFromSlice(elts) +} + +// NewSetFromSlice creates and returns a reference to a set from an +// existing slice. Operations on the resulting set are thread-safe. +func NewSetFromSlice(s []interface{}) Set { + a := NewSet(s...) + return a +} + +// NewThreadUnsafeSet creates and returns a reference to an empty set. +// Operations on the resulting set are not thread-safe. +func NewThreadUnsafeSet() Set { + set := newThreadUnsafeSet() + return &set +} + +// NewThreadUnsafeSetFromSlice creates and returns a reference to a +// set from an existing slice. Operations on the resulting set are +// not thread-safe. +func NewThreadUnsafeSetFromSlice(s []interface{}) Set { + a := NewThreadUnsafeSet() + for _, item := range s { + a.Add(item) + } + return a +} diff --git a/vendor/github.com/deckarep/golang-set/threadsafe.go b/vendor/github.com/deckarep/golang-set/threadsafe.go new file mode 100644 index 000000000000..002e06af1f42 --- /dev/null +++ b/vendor/github.com/deckarep/golang-set/threadsafe.go @@ -0,0 +1,277 @@ +/* +Open Source Initiative OSI - The MIT License (MIT):Licensing + +The MIT License (MIT) +Copyright (c) 2013 Ralph Caraveo (deckarep@gmail.com) + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. +*/ + +package mapset + +import "sync" + +type threadSafeSet struct { + s threadUnsafeSet + sync.RWMutex +} + +func newThreadSafeSet() threadSafeSet { + return threadSafeSet{s: newThreadUnsafeSet()} +} + +func (set *threadSafeSet) Add(i interface{}) bool { + set.Lock() + ret := set.s.Add(i) + set.Unlock() + return ret +} + +func (set *threadSafeSet) Contains(i ...interface{}) bool { + set.RLock() + ret := set.s.Contains(i...) + set.RUnlock() + return ret +} + +func (set *threadSafeSet) IsSubset(other Set) bool { + o := other.(*threadSafeSet) + + set.RLock() + o.RLock() + + ret := set.s.IsSubset(&o.s) + set.RUnlock() + o.RUnlock() + return ret +} + +func (set *threadSafeSet) IsProperSubset(other Set) bool { + o := other.(*threadSafeSet) + + set.RLock() + defer set.RUnlock() + o.RLock() + defer o.RUnlock() + + return set.s.IsProperSubset(&o.s) +} + +func (set *threadSafeSet) IsSuperset(other Set) bool { + return other.IsSubset(set) +} + +func (set *threadSafeSet) IsProperSuperset(other Set) bool { + return other.IsProperSubset(set) +} + +func (set *threadSafeSet) Union(other Set) Set { + o := other.(*threadSafeSet) + + set.RLock() + o.RLock() + + unsafeUnion := set.s.Union(&o.s).(*threadUnsafeSet) + ret := &threadSafeSet{s: *unsafeUnion} + set.RUnlock() + o.RUnlock() + return ret +} + +func (set *threadSafeSet) Intersect(other Set) Set { + o := other.(*threadSafeSet) + + set.RLock() + o.RLock() + + unsafeIntersection := set.s.Intersect(&o.s).(*threadUnsafeSet) + ret := &threadSafeSet{s: *unsafeIntersection} + set.RUnlock() + o.RUnlock() + return ret +} + +func (set *threadSafeSet) Difference(other Set) Set { + o := other.(*threadSafeSet) + + set.RLock() + o.RLock() + + unsafeDifference := set.s.Difference(&o.s).(*threadUnsafeSet) + ret := &threadSafeSet{s: *unsafeDifference} + set.RUnlock() + o.RUnlock() + return ret +} + +func (set *threadSafeSet) SymmetricDifference(other Set) Set { + o := other.(*threadSafeSet) + + set.RLock() + o.RLock() + + unsafeDifference := set.s.SymmetricDifference(&o.s).(*threadUnsafeSet) + ret := &threadSafeSet{s: *unsafeDifference} + set.RUnlock() + o.RUnlock() + return ret +} + +func (set *threadSafeSet) Clear() { + set.Lock() + set.s = newThreadUnsafeSet() + set.Unlock() +} + +func (set *threadSafeSet) Remove(i interface{}) { + set.Lock() + delete(set.s, i) + set.Unlock() +} + +func (set *threadSafeSet) Cardinality() int { + set.RLock() + defer set.RUnlock() + return len(set.s) +} + +func (set *threadSafeSet) Each(cb func(interface{}) bool) { + set.RLock() + for elem := range set.s { + if cb(elem) { + break + } + } + set.RUnlock() +} + +func (set *threadSafeSet) Iter() <-chan interface{} { + ch := make(chan interface{}) + go func() { + set.RLock() + + for elem := range set.s { + ch <- elem + } + close(ch) + set.RUnlock() + }() + + return ch +} + +func (set *threadSafeSet) Iterator() *Iterator { + iterator, ch, stopCh := newIterator() + + go func() { + set.RLock() + L: + for elem := range set.s { + select { + case <-stopCh: + break L + case ch <- elem: + } + } + close(ch) + set.RUnlock() + }() + + return iterator +} + +func (set *threadSafeSet) Equal(other Set) bool { + o := other.(*threadSafeSet) + + set.RLock() + o.RLock() + + ret := set.s.Equal(&o.s) + set.RUnlock() + o.RUnlock() + return ret +} + +func (set *threadSafeSet) Clone() Set { + set.RLock() + + unsafeClone := set.s.Clone().(*threadUnsafeSet) + ret := &threadSafeSet{s: *unsafeClone} + set.RUnlock() + return ret +} + +func (set *threadSafeSet) String() string { + set.RLock() + ret := set.s.String() + set.RUnlock() + return ret +} + +func (set *threadSafeSet) PowerSet() Set { + set.RLock() + ret := set.s.PowerSet() + set.RUnlock() + return ret +} + +func (set *threadSafeSet) Pop() interface{} { + set.Lock() + defer set.Unlock() + return set.s.Pop() +} + +func (set *threadSafeSet) CartesianProduct(other Set) Set { + o := other.(*threadSafeSet) + + set.RLock() + o.RLock() + + unsafeCartProduct := set.s.CartesianProduct(&o.s).(*threadUnsafeSet) + ret := &threadSafeSet{s: *unsafeCartProduct} + set.RUnlock() + o.RUnlock() + return ret +} + +func (set *threadSafeSet) ToSlice() []interface{} { + keys := make([]interface{}, 0, set.Cardinality()) + set.RLock() + for elem := range set.s { + keys = append(keys, elem) + } + set.RUnlock() + return keys +} + +func (set *threadSafeSet) MarshalJSON() ([]byte, error) { + set.RLock() + b, err := set.s.MarshalJSON() + set.RUnlock() + + return b, err +} + +func (set *threadSafeSet) UnmarshalJSON(p []byte) error { + set.RLock() + err := set.s.UnmarshalJSON(p) + set.RUnlock() + + return err +} diff --git a/vendor/github.com/deckarep/golang-set/threadunsafe.go b/vendor/github.com/deckarep/golang-set/threadunsafe.go new file mode 100644 index 000000000000..10bdd46f155c --- /dev/null +++ b/vendor/github.com/deckarep/golang-set/threadunsafe.go @@ -0,0 +1,337 @@ +/* +Open Source Initiative OSI - The MIT License (MIT):Licensing + +The MIT License (MIT) +Copyright (c) 2013 Ralph Caraveo (deckarep@gmail.com) + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. +*/ + +package mapset + +import ( + "bytes" + "encoding/json" + "fmt" + "reflect" + "strings" +) + +type threadUnsafeSet map[interface{}]struct{} + +// An OrderedPair represents a 2-tuple of values. +type OrderedPair struct { + First interface{} + Second interface{} +} + +func newThreadUnsafeSet() threadUnsafeSet { + return make(threadUnsafeSet) +} + +// Equal says whether two 2-tuples contain the same values in the same order. +func (pair *OrderedPair) Equal(other OrderedPair) bool { + if pair.First == other.First && + pair.Second == other.Second { + return true + } + + return false +} + +func (set *threadUnsafeSet) Add(i interface{}) bool { + _, found := (*set)[i] + if found { + return false //False if it existed already + } + + (*set)[i] = struct{}{} + return true +} + +func (set *threadUnsafeSet) Contains(i ...interface{}) bool { + for _, val := range i { + if _, ok := (*set)[val]; !ok { + return false + } + } + return true +} + +func (set *threadUnsafeSet) IsSubset(other Set) bool { + _ = other.(*threadUnsafeSet) + for elem := range *set { + if !other.Contains(elem) { + return false + } + } + return true +} + +func (set *threadUnsafeSet) IsProperSubset(other Set) bool { + return set.IsSubset(other) && !set.Equal(other) +} + +func (set *threadUnsafeSet) IsSuperset(other Set) bool { + return other.IsSubset(set) +} + +func (set *threadUnsafeSet) IsProperSuperset(other Set) bool { + return set.IsSuperset(other) && !set.Equal(other) +} + +func (set *threadUnsafeSet) Union(other Set) Set { + o := other.(*threadUnsafeSet) + + unionedSet := newThreadUnsafeSet() + + for elem := range *set { + unionedSet.Add(elem) + } + for elem := range *o { + unionedSet.Add(elem) + } + return &unionedSet +} + +func (set *threadUnsafeSet) Intersect(other Set) Set { + o := other.(*threadUnsafeSet) + + intersection := newThreadUnsafeSet() + // loop over smaller set + if set.Cardinality() < other.Cardinality() { + for elem := range *set { + if other.Contains(elem) { + intersection.Add(elem) + } + } + } else { + for elem := range *o { + if set.Contains(elem) { + intersection.Add(elem) + } + } + } + return &intersection +} + +func (set *threadUnsafeSet) Difference(other Set) Set { + _ = other.(*threadUnsafeSet) + + difference := newThreadUnsafeSet() + for elem := range *set { + if !other.Contains(elem) { + difference.Add(elem) + } + } + return &difference +} + +func (set *threadUnsafeSet) SymmetricDifference(other Set) Set { + _ = other.(*threadUnsafeSet) + + aDiff := set.Difference(other) + bDiff := other.Difference(set) + return aDiff.Union(bDiff) +} + +func (set *threadUnsafeSet) Clear() { + *set = newThreadUnsafeSet() +} + +func (set *threadUnsafeSet) Remove(i interface{}) { + delete(*set, i) +} + +func (set *threadUnsafeSet) Cardinality() int { + return len(*set) +} + +func (set *threadUnsafeSet) Each(cb func(interface{}) bool) { + for elem := range *set { + if cb(elem) { + break + } + } +} + +func (set *threadUnsafeSet) Iter() <-chan interface{} { + ch := make(chan interface{}) + go func() { + for elem := range *set { + ch <- elem + } + close(ch) + }() + + return ch +} + +func (set *threadUnsafeSet) Iterator() *Iterator { + iterator, ch, stopCh := newIterator() + + go func() { + L: + for elem := range *set { + select { + case <-stopCh: + break L + case ch <- elem: + } + } + close(ch) + }() + + return iterator +} + +func (set *threadUnsafeSet) Equal(other Set) bool { + _ = other.(*threadUnsafeSet) + + if set.Cardinality() != other.Cardinality() { + return false + } + for elem := range *set { + if !other.Contains(elem) { + return false + } + } + return true +} + +func (set *threadUnsafeSet) Clone() Set { + clonedSet := newThreadUnsafeSet() + for elem := range *set { + clonedSet.Add(elem) + } + return &clonedSet +} + +func (set *threadUnsafeSet) String() string { + items := make([]string, 0, len(*set)) + + for elem := range *set { + items = append(items, fmt.Sprintf("%v", elem)) + } + return fmt.Sprintf("Set{%s}", strings.Join(items, ", ")) +} + +// String outputs a 2-tuple in the form "(A, B)". +func (pair OrderedPair) String() string { + return fmt.Sprintf("(%v, %v)", pair.First, pair.Second) +} + +func (set *threadUnsafeSet) Pop() interface{} { + for item := range *set { + delete(*set, item) + return item + } + return nil +} + +func (set *threadUnsafeSet) PowerSet() Set { + powSet := NewThreadUnsafeSet() + nullset := newThreadUnsafeSet() + powSet.Add(&nullset) + + for es := range *set { + u := newThreadUnsafeSet() + j := powSet.Iter() + for er := range j { + p := newThreadUnsafeSet() + if reflect.TypeOf(er).Name() == "" { + k := er.(*threadUnsafeSet) + for ek := range *(k) { + p.Add(ek) + } + } else { + p.Add(er) + } + p.Add(es) + u.Add(&p) + } + + powSet = powSet.Union(&u) + } + + return powSet +} + +func (set *threadUnsafeSet) CartesianProduct(other Set) Set { + o := other.(*threadUnsafeSet) + cartProduct := NewThreadUnsafeSet() + + for i := range *set { + for j := range *o { + elem := OrderedPair{First: i, Second: j} + cartProduct.Add(elem) + } + } + + return cartProduct +} + +func (set *threadUnsafeSet) ToSlice() []interface{} { + keys := make([]interface{}, 0, set.Cardinality()) + for elem := range *set { + keys = append(keys, elem) + } + + return keys +} + +// MarshalJSON creates a JSON array from the set, it marshals all elements +func (set *threadUnsafeSet) MarshalJSON() ([]byte, error) { + items := make([]string, 0, set.Cardinality()) + + for elem := range *set { + b, err := json.Marshal(elem) + if err != nil { + return nil, err + } + + items = append(items, string(b)) + } + + return []byte(fmt.Sprintf("[%s]", strings.Join(items, ","))), nil +} + +// UnmarshalJSON recreates a set from a JSON array, it only decodes +// primitive types. Numbers are decoded as json.Number. +func (set *threadUnsafeSet) UnmarshalJSON(b []byte) error { + var i []interface{} + + d := json.NewDecoder(bytes.NewReader(b)) + d.UseNumber() + err := d.Decode(&i) + if err != nil { + return err + } + + for _, v := range i { + switch t := v.(type) { + case []interface{}, map[string]interface{}: + continue + default: + set.Add(t) + } + } + + return nil +} diff --git a/vendor/github.com/mohae/deepcopy/LICENSE b/vendor/github.com/mohae/deepcopy/LICENSE new file mode 100644 index 000000000000..419673f005e6 --- /dev/null +++ b/vendor/github.com/mohae/deepcopy/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2014 Joel + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/mohae/deepcopy/README.md b/vendor/github.com/mohae/deepcopy/README.md new file mode 100644 index 000000000000..f81841885b4e --- /dev/null +++ b/vendor/github.com/mohae/deepcopy/README.md @@ -0,0 +1,8 @@ +deepCopy +======== +[![GoDoc](https://godoc.org/github.com/mohae/deepcopy?status.svg)](https://godoc.org/github.com/mohae/deepcopy)[![Build Status](https://travis-ci.org/mohae/deepcopy.png)](https://travis-ci.org/mohae/deepcopy) + +DeepCopy makes deep copies of things: unexported field values are not copied. + +## Usage + cpy := deepcopy.Copy(orig) diff --git a/vendor/github.com/mohae/deepcopy/deepcopy.go b/vendor/github.com/mohae/deepcopy/deepcopy.go new file mode 100644 index 000000000000..ba763ad091e2 --- /dev/null +++ b/vendor/github.com/mohae/deepcopy/deepcopy.go @@ -0,0 +1,125 @@ +// deepcopy makes deep copies of things. A standard copy will copy the +// pointers: deep copy copies the values pointed to. Unexported field +// values are not copied. +// +// Copyright (c)2014-2016, Joel Scoble (github.com/mohae), all rights reserved. +// License: MIT, for more details check the included LICENSE file. +package deepcopy + +import ( + "reflect" + "time" +) + +// Interface for delegating copy process to type +type Interface interface { + DeepCopy() interface{} +} + +// Iface is an alias to Copy; this exists for backwards compatibility reasons. +func Iface(iface interface{}) interface{} { + return Copy(iface) +} + +// Copy creates a deep copy of whatever is passed to it and returns the copy +// in an interface{}. The returned value will need to be asserted to the +// correct type. +func Copy(src interface{}) interface{} { + if src == nil { + return nil + } + + // Make the interface a reflect.Value + original := reflect.ValueOf(src) + + // Make a copy of the same type as the original. + cpy := reflect.New(original.Type()).Elem() + + // Recursively copy the original. + copyRecursive(original, cpy) + + // Return the copy as an interface. + return cpy.Interface() +} + +// copyRecursive does the actual copying of the interface. It currently has +// limited support for what it can handle. Add as needed. +func copyRecursive(original, cpy reflect.Value) { + // check for implement deepcopy.Interface + if original.CanInterface() { + if copier, ok := original.Interface().(Interface); ok { + cpy.Set(reflect.ValueOf(copier.DeepCopy())) + return + } + } + + // handle according to original's Kind + switch original.Kind() { + case reflect.Ptr: + // Get the actual value being pointed to. + originalValue := original.Elem() + + // if it isn't valid, return. + if !originalValue.IsValid() { + return + } + cpy.Set(reflect.New(originalValue.Type())) + copyRecursive(originalValue, cpy.Elem()) + + case reflect.Interface: + // If this is a nil, don't do anything + if original.IsNil() { + return + } + // Get the value for the interface, not the pointer. + originalValue := original.Elem() + + // Get the value by calling Elem(). + copyValue := reflect.New(originalValue.Type()).Elem() + copyRecursive(originalValue, copyValue) + cpy.Set(copyValue) + + case reflect.Struct: + t, ok := original.Interface().(time.Time) + if ok { + cpy.Set(reflect.ValueOf(t)) + return + } + // Go through each field of the struct and copy it. + for i := 0; i < original.NumField(); i++ { + // The Type's StructField for a given field is checked to see if StructField.PkgPath + // is set to determine if the field is exported or not because CanSet() returns false + // for settable fields. I'm not sure why. -mohae + if original.Type().Field(i).PkgPath != "" { + continue + } + copyRecursive(original.Field(i), cpy.Field(i)) + } + + case reflect.Slice: + if original.IsNil() { + return + } + // Make a new slice and copy each element. + cpy.Set(reflect.MakeSlice(original.Type(), original.Len(), original.Cap())) + for i := 0; i < original.Len(); i++ { + copyRecursive(original.Index(i), cpy.Index(i)) + } + + case reflect.Map: + if original.IsNil() { + return + } + cpy.Set(reflect.MakeMap(original.Type())) + for _, key := range original.MapKeys() { + originalValue := original.MapIndex(key) + copyValue := reflect.New(originalValue.Type()).Elem() + copyRecursive(originalValue, copyValue) + copyKey := Copy(key.Interface()) + cpy.SetMapIndex(reflect.ValueOf(copyKey), copyValue) + } + + default: + cpy.Set(original) + } +} diff --git a/vendor/github.com/opentracing/opentracing-go/CHANGELOG.md b/vendor/github.com/opentracing/opentracing-go/CHANGELOG.md new file mode 100644 index 000000000000..1fc9fdf7f36a --- /dev/null +++ b/vendor/github.com/opentracing/opentracing-go/CHANGELOG.md @@ -0,0 +1,14 @@ +Changes by Version +================== + +1.1.0 (unreleased) +------------------- + +- Deprecate InitGlobalTracer() in favor of SetGlobalTracer() + + +1.0.0 (2016-09-26) +------------------- + +- This release implements OpenTracing Specification 1.0 (http://opentracing.io/spec) + diff --git a/vendor/github.com/Azure/go-autorest/LICENSE b/vendor/github.com/opentracing/opentracing-go/LICENSE similarity index 94% rename from vendor/github.com/Azure/go-autorest/LICENSE rename to vendor/github.com/opentracing/opentracing-go/LICENSE index b9d6a27ea92e..f0027349e830 100644 --- a/vendor/github.com/Azure/go-autorest/LICENSE +++ b/vendor/github.com/opentracing/opentracing-go/LICENSE @@ -1,4 +1,3 @@ - Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ @@ -176,7 +175,18 @@ END OF TERMS AND CONDITIONS - Copyright 2015 Microsoft Corporation + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2016 The OpenTracing Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/vendor/github.com/opentracing/opentracing-go/Makefile b/vendor/github.com/opentracing/opentracing-go/Makefile new file mode 100644 index 000000000000..d49a5c0d44fb --- /dev/null +++ b/vendor/github.com/opentracing/opentracing-go/Makefile @@ -0,0 +1,32 @@ +PACKAGES := . ./mocktracer/... ./ext/... + +.DEFAULT_GOAL := test-and-lint + +.PHONE: test-and-lint + +test-and-lint: test lint + +.PHONY: test +test: + go test -v -cover -race ./... + +cover: + @rm -rf cover-all.out + $(foreach pkg, $(PACKAGES), $(MAKE) cover-pkg PKG=$(pkg) || true;) + @grep mode: cover.out > coverage.out + @cat cover-all.out >> coverage.out + go tool cover -html=coverage.out -o cover.html + @rm -rf cover.out cover-all.out coverage.out + +cover-pkg: + go test -coverprofile cover.out $(PKG) + @grep -v mode: cover.out >> cover-all.out + +.PHONY: lint +lint: + go fmt ./... + golint ./... + @# Run again with magic to exit non-zero if golint outputs anything. + @! (golint ./... | read dummy) + go vet ./... + diff --git a/vendor/github.com/opentracing/opentracing-go/README.md b/vendor/github.com/opentracing/opentracing-go/README.md new file mode 100644 index 000000000000..007ee237c8a1 --- /dev/null +++ b/vendor/github.com/opentracing/opentracing-go/README.md @@ -0,0 +1,171 @@ +[![Gitter chat](http://img.shields.io/badge/gitter-join%20chat%20%E2%86%92-brightgreen.svg)](https://gitter.im/opentracing/public) [![Build Status](https://travis-ci.org/opentracing/opentracing-go.svg?branch=master)](https://travis-ci.org/opentracing/opentracing-go) [![GoDoc](https://godoc.org/github.com/opentracing/opentracing-go?status.svg)](http://godoc.org/github.com/opentracing/opentracing-go) +[![Sourcegraph Badge](https://sourcegraph.com/github.com/opentracing/opentracing-go/-/badge.svg)](https://sourcegraph.com/github.com/opentracing/opentracing-go?badge) + +# OpenTracing API for Go + +This package is a Go platform API for OpenTracing. + +## Required Reading + +In order to understand the Go platform API, one must first be familiar with the +[OpenTracing project](http://opentracing.io) and +[terminology](http://opentracing.io/documentation/pages/spec.html) more specifically. + +## API overview for those adding instrumentation + +Everyday consumers of this `opentracing` package really only need to worry +about a couple of key abstractions: the `StartSpan` function, the `Span` +interface, and binding a `Tracer` at `main()`-time. Here are code snippets +demonstrating some important use cases. + +#### Singleton initialization + +The simplest starting point is `./default_tracer.go`. As early as possible, call + +```go + import "github.com/opentracing/opentracing-go" + import ".../some_tracing_impl" + + func main() { + opentracing.SetGlobalTracer( + // tracing impl specific: + some_tracing_impl.New(...), + ) + ... + } +``` + +#### Non-Singleton initialization + +If you prefer direct control to singletons, manage ownership of the +`opentracing.Tracer` implementation explicitly. + +#### Creating a Span given an existing Go `context.Context` + +If you use `context.Context` in your application, OpenTracing's Go library will +happily rely on it for `Span` propagation. To start a new (blocking child) +`Span`, you can use `StartSpanFromContext`. + +```go + func xyz(ctx context.Context, ...) { + ... + span, ctx := opentracing.StartSpanFromContext(ctx, "operation_name") + defer span.Finish() + span.LogFields( + log.String("event", "soft error"), + log.String("type", "cache timeout"), + log.Int("waited.millis", 1500)) + ... + } +``` + +#### Starting an empty trace by creating a "root span" + +It's always possible to create a "root" `Span` with no parent or other causal +reference. + +```go + func xyz() { + ... + sp := opentracing.StartSpan("operation_name") + defer sp.Finish() + ... + } +``` + +#### Creating a (child) Span given an existing (parent) Span + +```go + func xyz(parentSpan opentracing.Span, ...) { + ... + sp := opentracing.StartSpan( + "operation_name", + opentracing.ChildOf(parentSpan.Context())) + defer sp.Finish() + ... + } +``` + +#### Serializing to the wire + +```go + func makeSomeRequest(ctx context.Context) ... { + if span := opentracing.SpanFromContext(ctx); span != nil { + httpClient := &http.Client{} + httpReq, _ := http.NewRequest("GET", "http://myservice/", nil) + + // Transmit the span's TraceContext as HTTP headers on our + // outbound request. + opentracing.GlobalTracer().Inject( + span.Context(), + opentracing.HTTPHeaders, + opentracing.HTTPHeadersCarrier(httpReq.Header)) + + resp, err := httpClient.Do(httpReq) + ... + } + ... + } +``` + +#### Deserializing from the wire + +```go + http.HandleFunc("/", func(w http.ResponseWriter, req *http.Request) { + var serverSpan opentracing.Span + appSpecificOperationName := ... + wireContext, err := opentracing.GlobalTracer().Extract( + opentracing.HTTPHeaders, + opentracing.HTTPHeadersCarrier(req.Header)) + if err != nil { + // Optionally record something about err here + } + + // Create the span referring to the RPC client if available. + // If wireContext == nil, a root span will be created. + serverSpan = opentracing.StartSpan( + appSpecificOperationName, + ext.RPCServerOption(wireContext)) + + defer serverSpan.Finish() + + ctx := opentracing.ContextWithSpan(context.Background(), serverSpan) + ... + } +``` + +#### Conditionally capture a field using `log.Noop` + +In some situations, you may want to dynamically decide whether or not +to log a field. For example, you may want to capture additional data, +such as a customer ID, in non-production environments: + +```go + func Customer(order *Order) log.Field { + if os.Getenv("ENVIRONMENT") == "dev" { + return log.String("customer", order.Customer.ID) + } + return log.Noop() + } +``` + +#### Goroutine-safety + +The entire public API is goroutine-safe and does not require external +synchronization. + +## API pointers for those implementing a tracing system + +Tracing system implementors may be able to reuse or copy-paste-modify the `basictracer` package, found [here](https://github.com/opentracing/basictracer-go). In particular, see `basictracer.New(...)`. + +## API compatibility + +For the time being, "mild" backwards-incompatible changes may be made without changing the major version number. As OpenTracing and `opentracing-go` mature, backwards compatibility will become more of a priority. + +## Tracer test suite + +A test suite is available in the [harness](https://godoc.org/github.com/opentracing/opentracing-go/harness) package that can assist Tracer implementors to assert that their Tracer is working correctly. + +## Licensing + +[Apache 2.0 License](./LICENSE). diff --git a/vendor/github.com/opentracing/opentracing-go/ext/tags.go b/vendor/github.com/opentracing/opentracing-go/ext/tags.go new file mode 100644 index 000000000000..8800129a237d --- /dev/null +++ b/vendor/github.com/opentracing/opentracing-go/ext/tags.go @@ -0,0 +1,210 @@ +package ext + +import opentracing "github.com/opentracing/opentracing-go" + +// These constants define common tag names recommended for better portability across +// tracing systems and languages/platforms. +// +// The tag names are defined as typed strings, so that in addition to the usual use +// +// span.setTag(TagName, value) +// +// they also support value type validation via this additional syntax: +// +// TagName.Set(span, value) +// +var ( + ////////////////////////////////////////////////////////////////////// + // SpanKind (client/server or producer/consumer) + ////////////////////////////////////////////////////////////////////// + + // SpanKind hints at relationship between spans, e.g. client/server + SpanKind = spanKindTagName("span.kind") + + // SpanKindRPCClient marks a span representing the client-side of an RPC + // or other remote call + SpanKindRPCClientEnum = SpanKindEnum("client") + SpanKindRPCClient = opentracing.Tag{Key: string(SpanKind), Value: SpanKindRPCClientEnum} + + // SpanKindRPCServer marks a span representing the server-side of an RPC + // or other remote call + SpanKindRPCServerEnum = SpanKindEnum("server") + SpanKindRPCServer = opentracing.Tag{Key: string(SpanKind), Value: SpanKindRPCServerEnum} + + // SpanKindProducer marks a span representing the producer-side of a + // message bus + SpanKindProducerEnum = SpanKindEnum("producer") + SpanKindProducer = opentracing.Tag{Key: string(SpanKind), Value: SpanKindProducerEnum} + + // SpanKindConsumer marks a span representing the consumer-side of a + // message bus + SpanKindConsumerEnum = SpanKindEnum("consumer") + SpanKindConsumer = opentracing.Tag{Key: string(SpanKind), Value: SpanKindConsumerEnum} + + ////////////////////////////////////////////////////////////////////// + // Component name + ////////////////////////////////////////////////////////////////////// + + // Component is a low-cardinality identifier of the module, library, + // or package that is generating a span. + Component = stringTagName("component") + + ////////////////////////////////////////////////////////////////////// + // Sampling hint + ////////////////////////////////////////////////////////////////////// + + // SamplingPriority determines the priority of sampling this Span. + SamplingPriority = uint16TagName("sampling.priority") + + ////////////////////////////////////////////////////////////////////// + // Peer tags. These tags can be emitted by either client-side of + // server-side to describe the other side/service in a peer-to-peer + // communications, like an RPC call. + ////////////////////////////////////////////////////////////////////// + + // PeerService records the service name of the peer. + PeerService = stringTagName("peer.service") + + // PeerAddress records the address name of the peer. This may be a "ip:port", + // a bare "hostname", a FQDN or even a database DSN substring + // like "mysql://username@127.0.0.1:3306/dbname" + PeerAddress = stringTagName("peer.address") + + // PeerHostname records the host name of the peer + PeerHostname = stringTagName("peer.hostname") + + // PeerHostIPv4 records IP v4 host address of the peer + PeerHostIPv4 = ipv4Tag("peer.ipv4") + + // PeerHostIPv6 records IP v6 host address of the peer + PeerHostIPv6 = stringTagName("peer.ipv6") + + // PeerPort records port number of the peer + PeerPort = uint16TagName("peer.port") + + ////////////////////////////////////////////////////////////////////// + // HTTP Tags + ////////////////////////////////////////////////////////////////////// + + // HTTPUrl should be the URL of the request being handled in this segment + // of the trace, in standard URI format. The protocol is optional. + HTTPUrl = stringTagName("http.url") + + // HTTPMethod is the HTTP method of the request, and is case-insensitive. + HTTPMethod = stringTagName("http.method") + + // HTTPStatusCode is the numeric HTTP status code (200, 404, etc) of the + // HTTP response. + HTTPStatusCode = uint16TagName("http.status_code") + + ////////////////////////////////////////////////////////////////////// + // DB Tags + ////////////////////////////////////////////////////////////////////// + + // DBInstance is database instance name. + DBInstance = stringTagName("db.instance") + + // DBStatement is a database statement for the given database type. + // It can be a query or a prepared statement (i.e., before substitution). + DBStatement = stringTagName("db.statement") + + // DBType is a database type. For any SQL database, "sql". + // For others, the lower-case database category, e.g. "redis" + DBType = stringTagName("db.type") + + // DBUser is a username for accessing database. + DBUser = stringTagName("db.user") + + ////////////////////////////////////////////////////////////////////// + // Message Bus Tag + ////////////////////////////////////////////////////////////////////// + + // MessageBusDestination is an address at which messages can be exchanged + MessageBusDestination = stringTagName("message_bus.destination") + + ////////////////////////////////////////////////////////////////////// + // Error Tag + ////////////////////////////////////////////////////////////////////// + + // Error indicates that operation represented by the span resulted in an error. + Error = boolTagName("error") +) + +// --- + +// SpanKindEnum represents common span types +type SpanKindEnum string + +type spanKindTagName string + +// Set adds a string tag to the `span` +func (tag spanKindTagName) Set(span opentracing.Span, value SpanKindEnum) { + span.SetTag(string(tag), value) +} + +type rpcServerOption struct { + clientContext opentracing.SpanContext +} + +func (r rpcServerOption) Apply(o *opentracing.StartSpanOptions) { + if r.clientContext != nil { + opentracing.ChildOf(r.clientContext).Apply(o) + } + SpanKindRPCServer.Apply(o) +} + +// RPCServerOption returns a StartSpanOption appropriate for an RPC server span +// with `client` representing the metadata for the remote peer Span if available. +// In case client == nil, due to the client not being instrumented, this RPC +// server span will be a root span. +func RPCServerOption(client opentracing.SpanContext) opentracing.StartSpanOption { + return rpcServerOption{client} +} + +// --- + +type stringTagName string + +// Set adds a string tag to the `span` +func (tag stringTagName) Set(span opentracing.Span, value string) { + span.SetTag(string(tag), value) +} + +// --- + +type uint32TagName string + +// Set adds a uint32 tag to the `span` +func (tag uint32TagName) Set(span opentracing.Span, value uint32) { + span.SetTag(string(tag), value) +} + +// --- + +type uint16TagName string + +// Set adds a uint16 tag to the `span` +func (tag uint16TagName) Set(span opentracing.Span, value uint16) { + span.SetTag(string(tag), value) +} + +// --- + +type boolTagName string + +// Add adds a bool tag to the `span` +func (tag boolTagName) Set(span opentracing.Span, value bool) { + span.SetTag(string(tag), value) +} + +type ipv4Tag string + +// Set adds IP v4 host address of the peer as an uint32 value to the `span`, keep this for backward and zipkin compatibility +func (tag ipv4Tag) Set(span opentracing.Span, value uint32) { + span.SetTag(string(tag), value) +} + +// SetString records IP v4 host address of the peer as a .-separated tuple to the `span`. E.g., "127.0.0.1" +func (tag ipv4Tag) SetString(span opentracing.Span, value string) { + span.SetTag(string(tag), value) +} diff --git a/vendor/github.com/opentracing/opentracing-go/globaltracer.go b/vendor/github.com/opentracing/opentracing-go/globaltracer.go new file mode 100644 index 000000000000..8c8e793ff23c --- /dev/null +++ b/vendor/github.com/opentracing/opentracing-go/globaltracer.go @@ -0,0 +1,32 @@ +package opentracing + +var ( + globalTracer Tracer = NoopTracer{} +) + +// SetGlobalTracer sets the [singleton] opentracing.Tracer returned by +// GlobalTracer(). Those who use GlobalTracer (rather than directly manage an +// opentracing.Tracer instance) should call SetGlobalTracer as early as +// possible in main(), prior to calling the `StartSpan` global func below. +// Prior to calling `SetGlobalTracer`, any Spans started via the `StartSpan` +// (etc) globals are noops. +func SetGlobalTracer(tracer Tracer) { + globalTracer = tracer +} + +// GlobalTracer returns the global singleton `Tracer` implementation. +// Before `SetGlobalTracer()` is called, the `GlobalTracer()` is a noop +// implementation that drops all data handed to it. +func GlobalTracer() Tracer { + return globalTracer +} + +// StartSpan defers to `Tracer.StartSpan`. See `GlobalTracer()`. +func StartSpan(operationName string, opts ...StartSpanOption) Span { + return globalTracer.StartSpan(operationName, opts...) +} + +// InitGlobalTracer is deprecated. Please use SetGlobalTracer. +func InitGlobalTracer(tracer Tracer) { + SetGlobalTracer(tracer) +} diff --git a/vendor/github.com/opentracing/opentracing-go/gocontext.go b/vendor/github.com/opentracing/opentracing-go/gocontext.go new file mode 100644 index 000000000000..05a62e70b11a --- /dev/null +++ b/vendor/github.com/opentracing/opentracing-go/gocontext.go @@ -0,0 +1,54 @@ +package opentracing + +import "context" + +type contextKey struct{} + +var activeSpanKey = contextKey{} + +// ContextWithSpan returns a new `context.Context` that holds a reference to +// `span`'s SpanContext. +func ContextWithSpan(ctx context.Context, span Span) context.Context { + return context.WithValue(ctx, activeSpanKey, span) +} + +// SpanFromContext returns the `Span` previously associated with `ctx`, or +// `nil` if no such `Span` could be found. +// +// NOTE: context.Context != SpanContext: the former is Go's intra-process +// context propagation mechanism, and the latter houses OpenTracing's per-Span +// identity and baggage information. +func SpanFromContext(ctx context.Context) Span { + val := ctx.Value(activeSpanKey) + if sp, ok := val.(Span); ok { + return sp + } + return nil +} + +// StartSpanFromContext starts and returns a Span with `operationName`, using +// any Span found within `ctx` as a ChildOfRef. If no such parent could be +// found, StartSpanFromContext creates a root (parentless) Span. +// +// The second return value is a context.Context object built around the +// returned Span. +// +// Example usage: +// +// SomeFunction(ctx context.Context, ...) { +// sp, ctx := opentracing.StartSpanFromContext(ctx, "SomeFunction") +// defer sp.Finish() +// ... +// } +func StartSpanFromContext(ctx context.Context, operationName string, opts ...StartSpanOption) (Span, context.Context) { + return startSpanFromContextWithTracer(ctx, GlobalTracer(), operationName, opts...) +} + +// startSpanFromContextWithTracer is factored out for testing purposes. +func startSpanFromContextWithTracer(ctx context.Context, tracer Tracer, operationName string, opts ...StartSpanOption) (Span, context.Context) { + if parentSpan := SpanFromContext(ctx); parentSpan != nil { + opts = append(opts, ChildOf(parentSpan.Context())) + } + span := tracer.StartSpan(operationName, opts...) + return span, ContextWithSpan(ctx, span) +} diff --git a/vendor/github.com/opentracing/opentracing-go/log/field.go b/vendor/github.com/opentracing/opentracing-go/log/field.go new file mode 100644 index 000000000000..50feea341a73 --- /dev/null +++ b/vendor/github.com/opentracing/opentracing-go/log/field.go @@ -0,0 +1,269 @@ +package log + +import ( + "fmt" + "math" +) + +type fieldType int + +const ( + stringType fieldType = iota + boolType + intType + int32Type + uint32Type + int64Type + uint64Type + float32Type + float64Type + errorType + objectType + lazyLoggerType + noopType +) + +// Field instances are constructed via LogBool, LogString, and so on. +// Tracing implementations may then handle them via the Field.Marshal +// method. +// +// "heavily influenced by" (i.e., partially stolen from) +// https://github.com/uber-go/zap +type Field struct { + key string + fieldType fieldType + numericVal int64 + stringVal string + interfaceVal interface{} +} + +// String adds a string-valued key:value pair to a Span.LogFields() record +func String(key, val string) Field { + return Field{ + key: key, + fieldType: stringType, + stringVal: val, + } +} + +// Bool adds a bool-valued key:value pair to a Span.LogFields() record +func Bool(key string, val bool) Field { + var numericVal int64 + if val { + numericVal = 1 + } + return Field{ + key: key, + fieldType: boolType, + numericVal: numericVal, + } +} + +// Int adds an int-valued key:value pair to a Span.LogFields() record +func Int(key string, val int) Field { + return Field{ + key: key, + fieldType: intType, + numericVal: int64(val), + } +} + +// Int32 adds an int32-valued key:value pair to a Span.LogFields() record +func Int32(key string, val int32) Field { + return Field{ + key: key, + fieldType: int32Type, + numericVal: int64(val), + } +} + +// Int64 adds an int64-valued key:value pair to a Span.LogFields() record +func Int64(key string, val int64) Field { + return Field{ + key: key, + fieldType: int64Type, + numericVal: val, + } +} + +// Uint32 adds a uint32-valued key:value pair to a Span.LogFields() record +func Uint32(key string, val uint32) Field { + return Field{ + key: key, + fieldType: uint32Type, + numericVal: int64(val), + } +} + +// Uint64 adds a uint64-valued key:value pair to a Span.LogFields() record +func Uint64(key string, val uint64) Field { + return Field{ + key: key, + fieldType: uint64Type, + numericVal: int64(val), + } +} + +// Float32 adds a float32-valued key:value pair to a Span.LogFields() record +func Float32(key string, val float32) Field { + return Field{ + key: key, + fieldType: float32Type, + numericVal: int64(math.Float32bits(val)), + } +} + +// Float64 adds a float64-valued key:value pair to a Span.LogFields() record +func Float64(key string, val float64) Field { + return Field{ + key: key, + fieldType: float64Type, + numericVal: int64(math.Float64bits(val)), + } +} + +// Error adds an error with the key "error" to a Span.LogFields() record +func Error(err error) Field { + return Field{ + key: "error", + fieldType: errorType, + interfaceVal: err, + } +} + +// Object adds an object-valued key:value pair to a Span.LogFields() record +func Object(key string, obj interface{}) Field { + return Field{ + key: key, + fieldType: objectType, + interfaceVal: obj, + } +} + +// LazyLogger allows for user-defined, late-bound logging of arbitrary data +type LazyLogger func(fv Encoder) + +// Lazy adds a LazyLogger to a Span.LogFields() record; the tracing +// implementation will call the LazyLogger function at an indefinite time in +// the future (after Lazy() returns). +func Lazy(ll LazyLogger) Field { + return Field{ + fieldType: lazyLoggerType, + interfaceVal: ll, + } +} + +// Noop creates a no-op log field that should be ignored by the tracer. +// It can be used to capture optional fields, for example those that should +// only be logged in non-production environment: +// +// func customerField(order *Order) log.Field { +// if os.Getenv("ENVIRONMENT") == "dev" { +// return log.String("customer", order.Customer.ID) +// } +// return log.Noop() +// } +// +// span.LogFields(log.String("event", "purchase"), customerField(order)) +// +func Noop() Field { + return Field{ + fieldType: noopType, + } +} + +// Encoder allows access to the contents of a Field (via a call to +// Field.Marshal). +// +// Tracer implementations typically provide an implementation of Encoder; +// OpenTracing callers typically do not need to concern themselves with it. +type Encoder interface { + EmitString(key, value string) + EmitBool(key string, value bool) + EmitInt(key string, value int) + EmitInt32(key string, value int32) + EmitInt64(key string, value int64) + EmitUint32(key string, value uint32) + EmitUint64(key string, value uint64) + EmitFloat32(key string, value float32) + EmitFloat64(key string, value float64) + EmitObject(key string, value interface{}) + EmitLazyLogger(value LazyLogger) +} + +// Marshal passes a Field instance through to the appropriate +// field-type-specific method of an Encoder. +func (lf Field) Marshal(visitor Encoder) { + switch lf.fieldType { + case stringType: + visitor.EmitString(lf.key, lf.stringVal) + case boolType: + visitor.EmitBool(lf.key, lf.numericVal != 0) + case intType: + visitor.EmitInt(lf.key, int(lf.numericVal)) + case int32Type: + visitor.EmitInt32(lf.key, int32(lf.numericVal)) + case int64Type: + visitor.EmitInt64(lf.key, int64(lf.numericVal)) + case uint32Type: + visitor.EmitUint32(lf.key, uint32(lf.numericVal)) + case uint64Type: + visitor.EmitUint64(lf.key, uint64(lf.numericVal)) + case float32Type: + visitor.EmitFloat32(lf.key, math.Float32frombits(uint32(lf.numericVal))) + case float64Type: + visitor.EmitFloat64(lf.key, math.Float64frombits(uint64(lf.numericVal))) + case errorType: + if err, ok := lf.interfaceVal.(error); ok { + visitor.EmitString(lf.key, err.Error()) + } else { + visitor.EmitString(lf.key, "") + } + case objectType: + visitor.EmitObject(lf.key, lf.interfaceVal) + case lazyLoggerType: + visitor.EmitLazyLogger(lf.interfaceVal.(LazyLogger)) + case noopType: + // intentionally left blank + } +} + +// Key returns the field's key. +func (lf Field) Key() string { + return lf.key +} + +// Value returns the field's value as interface{}. +func (lf Field) Value() interface{} { + switch lf.fieldType { + case stringType: + return lf.stringVal + case boolType: + return lf.numericVal != 0 + case intType: + return int(lf.numericVal) + case int32Type: + return int32(lf.numericVal) + case int64Type: + return int64(lf.numericVal) + case uint32Type: + return uint32(lf.numericVal) + case uint64Type: + return uint64(lf.numericVal) + case float32Type: + return math.Float32frombits(uint32(lf.numericVal)) + case float64Type: + return math.Float64frombits(uint64(lf.numericVal)) + case errorType, objectType, lazyLoggerType: + return lf.interfaceVal + case noopType: + return nil + default: + return nil + } +} + +// String returns a string representation of the key and value. +func (lf Field) String() string { + return fmt.Sprint(lf.key, ":", lf.Value()) +} diff --git a/vendor/github.com/opentracing/opentracing-go/log/util.go b/vendor/github.com/opentracing/opentracing-go/log/util.go new file mode 100644 index 000000000000..3832feb5ceb2 --- /dev/null +++ b/vendor/github.com/opentracing/opentracing-go/log/util.go @@ -0,0 +1,54 @@ +package log + +import "fmt" + +// InterleavedKVToFields converts keyValues a la Span.LogKV() to a Field slice +// a la Span.LogFields(). +func InterleavedKVToFields(keyValues ...interface{}) ([]Field, error) { + if len(keyValues)%2 != 0 { + return nil, fmt.Errorf("non-even keyValues len: %d", len(keyValues)) + } + fields := make([]Field, len(keyValues)/2) + for i := 0; i*2 < len(keyValues); i++ { + key, ok := keyValues[i*2].(string) + if !ok { + return nil, fmt.Errorf( + "non-string key (pair #%d): %T", + i, keyValues[i*2]) + } + switch typedVal := keyValues[i*2+1].(type) { + case bool: + fields[i] = Bool(key, typedVal) + case string: + fields[i] = String(key, typedVal) + case int: + fields[i] = Int(key, typedVal) + case int8: + fields[i] = Int32(key, int32(typedVal)) + case int16: + fields[i] = Int32(key, int32(typedVal)) + case int32: + fields[i] = Int32(key, typedVal) + case int64: + fields[i] = Int64(key, typedVal) + case uint: + fields[i] = Uint64(key, uint64(typedVal)) + case uint64: + fields[i] = Uint64(key, typedVal) + case uint8: + fields[i] = Uint32(key, uint32(typedVal)) + case uint16: + fields[i] = Uint32(key, uint32(typedVal)) + case uint32: + fields[i] = Uint32(key, typedVal) + case float32: + fields[i] = Float32(key, typedVal) + case float64: + fields[i] = Float64(key, typedVal) + default: + // When in doubt, coerce to a string + fields[i] = String(key, fmt.Sprint(typedVal)) + } + } + return fields, nil +} diff --git a/vendor/github.com/opentracing/opentracing-go/noop.go b/vendor/github.com/opentracing/opentracing-go/noop.go new file mode 100644 index 000000000000..0d32f692c410 --- /dev/null +++ b/vendor/github.com/opentracing/opentracing-go/noop.go @@ -0,0 +1,64 @@ +package opentracing + +import "github.com/opentracing/opentracing-go/log" + +// A NoopTracer is a trivial, minimum overhead implementation of Tracer +// for which all operations are no-ops. +// +// The primary use of this implementation is in libraries, such as RPC +// frameworks, that make tracing an optional feature controlled by the +// end user. A no-op implementation allows said libraries to use it +// as the default Tracer and to write instrumentation that does +// not need to keep checking if the tracer instance is nil. +// +// For the same reason, the NoopTracer is the default "global" tracer +// (see GlobalTracer and SetGlobalTracer functions). +// +// WARNING: NoopTracer does not support baggage propagation. +type NoopTracer struct{} + +type noopSpan struct{} +type noopSpanContext struct{} + +var ( + defaultNoopSpanContext = noopSpanContext{} + defaultNoopSpan = noopSpan{} + defaultNoopTracer = NoopTracer{} +) + +const ( + emptyString = "" +) + +// noopSpanContext: +func (n noopSpanContext) ForeachBaggageItem(handler func(k, v string) bool) {} + +// noopSpan: +func (n noopSpan) Context() SpanContext { return defaultNoopSpanContext } +func (n noopSpan) SetBaggageItem(key, val string) Span { return defaultNoopSpan } +func (n noopSpan) BaggageItem(key string) string { return emptyString } +func (n noopSpan) SetTag(key string, value interface{}) Span { return n } +func (n noopSpan) LogFields(fields ...log.Field) {} +func (n noopSpan) LogKV(keyVals ...interface{}) {} +func (n noopSpan) Finish() {} +func (n noopSpan) FinishWithOptions(opts FinishOptions) {} +func (n noopSpan) SetOperationName(operationName string) Span { return n } +func (n noopSpan) Tracer() Tracer { return defaultNoopTracer } +func (n noopSpan) LogEvent(event string) {} +func (n noopSpan) LogEventWithPayload(event string, payload interface{}) {} +func (n noopSpan) Log(data LogData) {} + +// StartSpan belongs to the Tracer interface. +func (n NoopTracer) StartSpan(operationName string, opts ...StartSpanOption) Span { + return defaultNoopSpan +} + +// Inject belongs to the Tracer interface. +func (n NoopTracer) Inject(sp SpanContext, format interface{}, carrier interface{}) error { + return nil +} + +// Extract belongs to the Tracer interface. +func (n NoopTracer) Extract(format interface{}, carrier interface{}) (SpanContext, error) { + return nil, ErrSpanContextNotFound +} diff --git a/vendor/github.com/opentracing/opentracing-go/propagation.go b/vendor/github.com/opentracing/opentracing-go/propagation.go new file mode 100644 index 000000000000..0dd466a373ef --- /dev/null +++ b/vendor/github.com/opentracing/opentracing-go/propagation.go @@ -0,0 +1,176 @@ +package opentracing + +import ( + "errors" + "net/http" +) + +/////////////////////////////////////////////////////////////////////////////// +// CORE PROPAGATION INTERFACES: +/////////////////////////////////////////////////////////////////////////////// + +var ( + // ErrUnsupportedFormat occurs when the `format` passed to Tracer.Inject() or + // Tracer.Extract() is not recognized by the Tracer implementation. + ErrUnsupportedFormat = errors.New("opentracing: Unknown or unsupported Inject/Extract format") + + // ErrSpanContextNotFound occurs when the `carrier` passed to + // Tracer.Extract() is valid and uncorrupted but has insufficient + // information to extract a SpanContext. + ErrSpanContextNotFound = errors.New("opentracing: SpanContext not found in Extract carrier") + + // ErrInvalidSpanContext errors occur when Tracer.Inject() is asked to + // operate on a SpanContext which it is not prepared to handle (for + // example, since it was created by a different tracer implementation). + ErrInvalidSpanContext = errors.New("opentracing: SpanContext type incompatible with tracer") + + // ErrInvalidCarrier errors occur when Tracer.Inject() or Tracer.Extract() + // implementations expect a different type of `carrier` than they are + // given. + ErrInvalidCarrier = errors.New("opentracing: Invalid Inject/Extract carrier") + + // ErrSpanContextCorrupted occurs when the `carrier` passed to + // Tracer.Extract() is of the expected type but is corrupted. + ErrSpanContextCorrupted = errors.New("opentracing: SpanContext data corrupted in Extract carrier") +) + +/////////////////////////////////////////////////////////////////////////////// +// BUILTIN PROPAGATION FORMATS: +/////////////////////////////////////////////////////////////////////////////// + +// BuiltinFormat is used to demarcate the values within package `opentracing` +// that are intended for use with the Tracer.Inject() and Tracer.Extract() +// methods. +type BuiltinFormat byte + +const ( + // Binary represents SpanContexts as opaque binary data. + // + // For Tracer.Inject(): the carrier must be an `io.Writer`. + // + // For Tracer.Extract(): the carrier must be an `io.Reader`. + Binary BuiltinFormat = iota + + // TextMap represents SpanContexts as key:value string pairs. + // + // Unlike HTTPHeaders, the TextMap format does not restrict the key or + // value character sets in any way. + // + // For Tracer.Inject(): the carrier must be a `TextMapWriter`. + // + // For Tracer.Extract(): the carrier must be a `TextMapReader`. + TextMap + + // HTTPHeaders represents SpanContexts as HTTP header string pairs. + // + // Unlike TextMap, the HTTPHeaders format requires that the keys and values + // be valid as HTTP headers as-is (i.e., character casing may be unstable + // and special characters are disallowed in keys, values should be + // URL-escaped, etc). + // + // For Tracer.Inject(): the carrier must be a `TextMapWriter`. + // + // For Tracer.Extract(): the carrier must be a `TextMapReader`. + // + // See HTTPHeadersCarrier for an implementation of both TextMapWriter + // and TextMapReader that defers to an http.Header instance for storage. + // For example, Inject(): + // + // carrier := opentracing.HTTPHeadersCarrier(httpReq.Header) + // err := span.Tracer().Inject( + // span.Context(), opentracing.HTTPHeaders, carrier) + // + // Or Extract(): + // + // carrier := opentracing.HTTPHeadersCarrier(httpReq.Header) + // clientContext, err := tracer.Extract( + // opentracing.HTTPHeaders, carrier) + // + HTTPHeaders +) + +// TextMapWriter is the Inject() carrier for the TextMap builtin format. With +// it, the caller can encode a SpanContext for propagation as entries in a map +// of unicode strings. +type TextMapWriter interface { + // Set a key:value pair to the carrier. Multiple calls to Set() for the + // same key leads to undefined behavior. + // + // NOTE: The backing store for the TextMapWriter may contain data unrelated + // to SpanContext. As such, Inject() and Extract() implementations that + // call the TextMapWriter and TextMapReader interfaces must agree on a + // prefix or other convention to distinguish their own key:value pairs. + Set(key, val string) +} + +// TextMapReader is the Extract() carrier for the TextMap builtin format. With it, +// the caller can decode a propagated SpanContext as entries in a map of +// unicode strings. +type TextMapReader interface { + // ForeachKey returns TextMap contents via repeated calls to the `handler` + // function. If any call to `handler` returns a non-nil error, ForeachKey + // terminates and returns that error. + // + // NOTE: The backing store for the TextMapReader may contain data unrelated + // to SpanContext. As such, Inject() and Extract() implementations that + // call the TextMapWriter and TextMapReader interfaces must agree on a + // prefix or other convention to distinguish their own key:value pairs. + // + // The "foreach" callback pattern reduces unnecessary copying in some cases + // and also allows implementations to hold locks while the map is read. + ForeachKey(handler func(key, val string) error) error +} + +// TextMapCarrier allows the use of regular map[string]string +// as both TextMapWriter and TextMapReader. +type TextMapCarrier map[string]string + +// ForeachKey conforms to the TextMapReader interface. +func (c TextMapCarrier) ForeachKey(handler func(key, val string) error) error { + for k, v := range c { + if err := handler(k, v); err != nil { + return err + } + } + return nil +} + +// Set implements Set() of opentracing.TextMapWriter +func (c TextMapCarrier) Set(key, val string) { + c[key] = val +} + +// HTTPHeadersCarrier satisfies both TextMapWriter and TextMapReader. +// +// Example usage for server side: +// +// carrier := opentracing.HTTPHeadersCarrier(httpReq.Header) +// clientContext, err := tracer.Extract(opentracing.HTTPHeaders, carrier) +// +// Example usage for client side: +// +// carrier := opentracing.HTTPHeadersCarrier(httpReq.Header) +// err := tracer.Inject( +// span.Context(), +// opentracing.HTTPHeaders, +// carrier) +// +type HTTPHeadersCarrier http.Header + +// Set conforms to the TextMapWriter interface. +func (c HTTPHeadersCarrier) Set(key, val string) { + h := http.Header(c) + h.Add(key, val) +} + +// ForeachKey conforms to the TextMapReader interface. +func (c HTTPHeadersCarrier) ForeachKey(handler func(key, val string) error) error { + for k, vals := range c { + for _, v := range vals { + if err := handler(k, v); err != nil { + return err + } + } + } + return nil +} diff --git a/vendor/github.com/opentracing/opentracing-go/span.go b/vendor/github.com/opentracing/opentracing-go/span.go new file mode 100644 index 000000000000..0d3fb5341838 --- /dev/null +++ b/vendor/github.com/opentracing/opentracing-go/span.go @@ -0,0 +1,189 @@ +package opentracing + +import ( + "time" + + "github.com/opentracing/opentracing-go/log" +) + +// SpanContext represents Span state that must propagate to descendant Spans and across process +// boundaries (e.g., a tuple). +type SpanContext interface { + // ForeachBaggageItem grants access to all baggage items stored in the + // SpanContext. + // The handler function will be called for each baggage key/value pair. + // The ordering of items is not guaranteed. + // + // The bool return value indicates if the handler wants to continue iterating + // through the rest of the baggage items; for example if the handler is trying to + // find some baggage item by pattern matching the name, it can return false + // as soon as the item is found to stop further iterations. + ForeachBaggageItem(handler func(k, v string) bool) +} + +// Span represents an active, un-finished span in the OpenTracing system. +// +// Spans are created by the Tracer interface. +type Span interface { + // Sets the end timestamp and finalizes Span state. + // + // With the exception of calls to Context() (which are always allowed), + // Finish() must be the last call made to any span instance, and to do + // otherwise leads to undefined behavior. + Finish() + // FinishWithOptions is like Finish() but with explicit control over + // timestamps and log data. + FinishWithOptions(opts FinishOptions) + + // Context() yields the SpanContext for this Span. Note that the return + // value of Context() is still valid after a call to Span.Finish(), as is + // a call to Span.Context() after a call to Span.Finish(). + Context() SpanContext + + // Sets or changes the operation name. + // + // Returns a reference to this Span for chaining. + SetOperationName(operationName string) Span + + // Adds a tag to the span. + // + // If there is a pre-existing tag set for `key`, it is overwritten. + // + // Tag values can be numeric types, strings, or bools. The behavior of + // other tag value types is undefined at the OpenTracing level. If a + // tracing system does not know how to handle a particular value type, it + // may ignore the tag, but shall not panic. + // + // Returns a reference to this Span for chaining. + SetTag(key string, value interface{}) Span + + // LogFields is an efficient and type-checked way to record key:value + // logging data about a Span, though the programming interface is a little + // more verbose than LogKV(). Here's an example: + // + // span.LogFields( + // log.String("event", "soft error"), + // log.String("type", "cache timeout"), + // log.Int("waited.millis", 1500)) + // + // Also see Span.FinishWithOptions() and FinishOptions.BulkLogData. + LogFields(fields ...log.Field) + + // LogKV is a concise, readable way to record key:value logging data about + // a Span, though unfortunately this also makes it less efficient and less + // type-safe than LogFields(). Here's an example: + // + // span.LogKV( + // "event", "soft error", + // "type", "cache timeout", + // "waited.millis", 1500) + // + // For LogKV (as opposed to LogFields()), the parameters must appear as + // key-value pairs, like + // + // span.LogKV(key1, val1, key2, val2, key3, val3, ...) + // + // The keys must all be strings. The values may be strings, numeric types, + // bools, Go error instances, or arbitrary structs. + // + // (Note to implementors: consider the log.InterleavedKVToFields() helper) + LogKV(alternatingKeyValues ...interface{}) + + // SetBaggageItem sets a key:value pair on this Span and its SpanContext + // that also propagates to descendants of this Span. + // + // SetBaggageItem() enables powerful functionality given a full-stack + // opentracing integration (e.g., arbitrary application data from a mobile + // app can make it, transparently, all the way into the depths of a storage + // system), and with it some powerful costs: use this feature with care. + // + // IMPORTANT NOTE #1: SetBaggageItem() will only propagate baggage items to + // *future* causal descendants of the associated Span. + // + // IMPORTANT NOTE #2: Use this thoughtfully and with care. Every key and + // value is copied into every local *and remote* child of the associated + // Span, and that can add up to a lot of network and cpu overhead. + // + // Returns a reference to this Span for chaining. + SetBaggageItem(restrictedKey, value string) Span + + // Gets the value for a baggage item given its key. Returns the empty string + // if the value isn't found in this Span. + BaggageItem(restrictedKey string) string + + // Provides access to the Tracer that created this Span. + Tracer() Tracer + + // Deprecated: use LogFields or LogKV + LogEvent(event string) + // Deprecated: use LogFields or LogKV + LogEventWithPayload(event string, payload interface{}) + // Deprecated: use LogFields or LogKV + Log(data LogData) +} + +// LogRecord is data associated with a single Span log. Every LogRecord +// instance must specify at least one Field. +type LogRecord struct { + Timestamp time.Time + Fields []log.Field +} + +// FinishOptions allows Span.FinishWithOptions callers to override the finish +// timestamp and provide log data via a bulk interface. +type FinishOptions struct { + // FinishTime overrides the Span's finish time, or implicitly becomes + // time.Now() if FinishTime.IsZero(). + // + // FinishTime must resolve to a timestamp that's >= the Span's StartTime + // (per StartSpanOptions). + FinishTime time.Time + + // LogRecords allows the caller to specify the contents of many LogFields() + // calls with a single slice. May be nil. + // + // None of the LogRecord.Timestamp values may be .IsZero() (i.e., they must + // be set explicitly). Also, they must be >= the Span's start timestamp and + // <= the FinishTime (or time.Now() if FinishTime.IsZero()). Otherwise the + // behavior of FinishWithOptions() is undefined. + // + // If specified, the caller hands off ownership of LogRecords at + // FinishWithOptions() invocation time. + // + // If specified, the (deprecated) BulkLogData must be nil or empty. + LogRecords []LogRecord + + // BulkLogData is DEPRECATED. + BulkLogData []LogData +} + +// LogData is DEPRECATED +type LogData struct { + Timestamp time.Time + Event string + Payload interface{} +} + +// ToLogRecord converts a deprecated LogData to a non-deprecated LogRecord +func (ld *LogData) ToLogRecord() LogRecord { + var literalTimestamp time.Time + if ld.Timestamp.IsZero() { + literalTimestamp = time.Now() + } else { + literalTimestamp = ld.Timestamp + } + rval := LogRecord{ + Timestamp: literalTimestamp, + } + if ld.Payload == nil { + rval.Fields = []log.Field{ + log.String("event", ld.Event), + } + } else { + rval.Fields = []log.Field{ + log.String("event", ld.Event), + log.Object("payload", ld.Payload), + } + } + return rval +} diff --git a/vendor/github.com/opentracing/opentracing-go/tracer.go b/vendor/github.com/opentracing/opentracing-go/tracer.go new file mode 100644 index 000000000000..7bca1f73676d --- /dev/null +++ b/vendor/github.com/opentracing/opentracing-go/tracer.go @@ -0,0 +1,305 @@ +package opentracing + +import "time" + +// Tracer is a simple, thin interface for Span creation and SpanContext +// propagation. +type Tracer interface { + + // Create, start, and return a new Span with the given `operationName` and + // incorporate the given StartSpanOption `opts`. (Note that `opts` borrows + // from the "functional options" pattern, per + // http://dave.cheney.net/2014/10/17/functional-options-for-friendly-apis) + // + // A Span with no SpanReference options (e.g., opentracing.ChildOf() or + // opentracing.FollowsFrom()) becomes the root of its own trace. + // + // Examples: + // + // var tracer opentracing.Tracer = ... + // + // // The root-span case: + // sp := tracer.StartSpan("GetFeed") + // + // // The vanilla child span case: + // sp := tracer.StartSpan( + // "GetFeed", + // opentracing.ChildOf(parentSpan.Context())) + // + // // All the bells and whistles: + // sp := tracer.StartSpan( + // "GetFeed", + // opentracing.ChildOf(parentSpan.Context()), + // opentracing.Tag{"user_agent", loggedReq.UserAgent}, + // opentracing.StartTime(loggedReq.Timestamp), + // ) + // + StartSpan(operationName string, opts ...StartSpanOption) Span + + // Inject() takes the `sm` SpanContext instance and injects it for + // propagation within `carrier`. The actual type of `carrier` depends on + // the value of `format`. + // + // OpenTracing defines a common set of `format` values (see BuiltinFormat), + // and each has an expected carrier type. + // + // Other packages may declare their own `format` values, much like the keys + // used by `context.Context` (see + // https://godoc.org/golang.org/x/net/context#WithValue). + // + // Example usage (sans error handling): + // + // carrier := opentracing.HTTPHeadersCarrier(httpReq.Header) + // err := tracer.Inject( + // span.Context(), + // opentracing.HTTPHeaders, + // carrier) + // + // NOTE: All opentracing.Tracer implementations MUST support all + // BuiltinFormats. + // + // Implementations may return opentracing.ErrUnsupportedFormat if `format` + // is not supported by (or not known by) the implementation. + // + // Implementations may return opentracing.ErrInvalidCarrier or any other + // implementation-specific error if the format is supported but injection + // fails anyway. + // + // See Tracer.Extract(). + Inject(sm SpanContext, format interface{}, carrier interface{}) error + + // Extract() returns a SpanContext instance given `format` and `carrier`. + // + // OpenTracing defines a common set of `format` values (see BuiltinFormat), + // and each has an expected carrier type. + // + // Other packages may declare their own `format` values, much like the keys + // used by `context.Context` (see + // https://godoc.org/golang.org/x/net/context#WithValue). + // + // Example usage (with StartSpan): + // + // + // carrier := opentracing.HTTPHeadersCarrier(httpReq.Header) + // clientContext, err := tracer.Extract(opentracing.HTTPHeaders, carrier) + // + // // ... assuming the ultimate goal here is to resume the trace with a + // // server-side Span: + // var serverSpan opentracing.Span + // if err == nil { + // span = tracer.StartSpan( + // rpcMethodName, ext.RPCServerOption(clientContext)) + // } else { + // span = tracer.StartSpan(rpcMethodName) + // } + // + // + // NOTE: All opentracing.Tracer implementations MUST support all + // BuiltinFormats. + // + // Return values: + // - A successful Extract returns a SpanContext instance and a nil error + // - If there was simply no SpanContext to extract in `carrier`, Extract() + // returns (nil, opentracing.ErrSpanContextNotFound) + // - If `format` is unsupported or unrecognized, Extract() returns (nil, + // opentracing.ErrUnsupportedFormat) + // - If there are more fundamental problems with the `carrier` object, + // Extract() may return opentracing.ErrInvalidCarrier, + // opentracing.ErrSpanContextCorrupted, or implementation-specific + // errors. + // + // See Tracer.Inject(). + Extract(format interface{}, carrier interface{}) (SpanContext, error) +} + +// StartSpanOptions allows Tracer.StartSpan() callers and implementors a +// mechanism to override the start timestamp, specify Span References, and make +// a single Tag or multiple Tags available at Span start time. +// +// StartSpan() callers should look at the StartSpanOption interface and +// implementations available in this package. +// +// Tracer implementations can convert a slice of `StartSpanOption` instances +// into a `StartSpanOptions` struct like so: +// +// func StartSpan(opName string, opts ...opentracing.StartSpanOption) { +// sso := opentracing.StartSpanOptions{} +// for _, o := range opts { +// o.Apply(&sso) +// } +// ... +// } +// +type StartSpanOptions struct { + // Zero or more causal references to other Spans (via their SpanContext). + // If empty, start a "root" Span (i.e., start a new trace). + References []SpanReference + + // StartTime overrides the Span's start time, or implicitly becomes + // time.Now() if StartTime.IsZero(). + StartTime time.Time + + // Tags may have zero or more entries; the restrictions on map values are + // identical to those for Span.SetTag(). May be nil. + // + // If specified, the caller hands off ownership of Tags at + // StartSpan() invocation time. + Tags map[string]interface{} +} + +// StartSpanOption instances (zero or more) may be passed to Tracer.StartSpan. +// +// StartSpanOption borrows from the "functional options" pattern, per +// http://dave.cheney.net/2014/10/17/functional-options-for-friendly-apis +type StartSpanOption interface { + Apply(*StartSpanOptions) +} + +// SpanReferenceType is an enum type describing different categories of +// relationships between two Spans. If Span-2 refers to Span-1, the +// SpanReferenceType describes Span-1 from Span-2's perspective. For example, +// ChildOfRef means that Span-1 created Span-2. +// +// NOTE: Span-1 and Span-2 do *not* necessarily depend on each other for +// completion; e.g., Span-2 may be part of a background job enqueued by Span-1, +// or Span-2 may be sitting in a distributed queue behind Span-1. +type SpanReferenceType int + +const ( + // ChildOfRef refers to a parent Span that caused *and* somehow depends + // upon the new child Span. Often (but not always), the parent Span cannot + // finish until the child Span does. + // + // An timing diagram for a ChildOfRef that's blocked on the new Span: + // + // [-Parent Span---------] + // [-Child Span----] + // + // See http://opentracing.io/spec/ + // + // See opentracing.ChildOf() + ChildOfRef SpanReferenceType = iota + + // FollowsFromRef refers to a parent Span that does not depend in any way + // on the result of the new child Span. For instance, one might use + // FollowsFromRefs to describe pipeline stages separated by queues, + // or a fire-and-forget cache insert at the tail end of a web request. + // + // A FollowsFromRef Span is part of the same logical trace as the new Span: + // i.e., the new Span is somehow caused by the work of its FollowsFromRef. + // + // All of the following could be valid timing diagrams for children that + // "FollowFrom" a parent. + // + // [-Parent Span-] [-Child Span-] + // + // + // [-Parent Span--] + // [-Child Span-] + // + // + // [-Parent Span-] + // [-Child Span-] + // + // See http://opentracing.io/spec/ + // + // See opentracing.FollowsFrom() + FollowsFromRef +) + +// SpanReference is a StartSpanOption that pairs a SpanReferenceType and a +// referenced SpanContext. See the SpanReferenceType documentation for +// supported relationships. If SpanReference is created with +// ReferencedContext==nil, it has no effect. Thus it allows for a more concise +// syntax for starting spans: +// +// sc, _ := tracer.Extract(someFormat, someCarrier) +// span := tracer.StartSpan("operation", opentracing.ChildOf(sc)) +// +// The `ChildOf(sc)` option above will not panic if sc == nil, it will just +// not add the parent span reference to the options. +type SpanReference struct { + Type SpanReferenceType + ReferencedContext SpanContext +} + +// Apply satisfies the StartSpanOption interface. +func (r SpanReference) Apply(o *StartSpanOptions) { + if r.ReferencedContext != nil { + o.References = append(o.References, r) + } +} + +// ChildOf returns a StartSpanOption pointing to a dependent parent span. +// If sc == nil, the option has no effect. +// +// See ChildOfRef, SpanReference +func ChildOf(sc SpanContext) SpanReference { + return SpanReference{ + Type: ChildOfRef, + ReferencedContext: sc, + } +} + +// FollowsFrom returns a StartSpanOption pointing to a parent Span that caused +// the child Span but does not directly depend on its result in any way. +// If sc == nil, the option has no effect. +// +// See FollowsFromRef, SpanReference +func FollowsFrom(sc SpanContext) SpanReference { + return SpanReference{ + Type: FollowsFromRef, + ReferencedContext: sc, + } +} + +// StartTime is a StartSpanOption that sets an explicit start timestamp for the +// new Span. +type StartTime time.Time + +// Apply satisfies the StartSpanOption interface. +func (t StartTime) Apply(o *StartSpanOptions) { + o.StartTime = time.Time(t) +} + +// Tags are a generic map from an arbitrary string key to an opaque value type. +// The underlying tracing system is responsible for interpreting and +// serializing the values. +type Tags map[string]interface{} + +// Apply satisfies the StartSpanOption interface. +func (t Tags) Apply(o *StartSpanOptions) { + if o.Tags == nil { + o.Tags = make(map[string]interface{}) + } + for k, v := range t { + o.Tags[k] = v + } +} + +// Tag may be passed as a StartSpanOption to add a tag to new spans, +// or its Set method may be used to apply the tag to an existing Span, +// for example: +// +// tracer.StartSpan("opName", Tag{"Key", value}) +// +// or +// +// Tag{"key", value}.Set(span) +type Tag struct { + Key string + Value interface{} +} + +// Apply satisfies the StartSpanOption interface. +func (t Tag) Apply(o *StartSpanOptions) { + if o.Tags == nil { + o.Tags = make(map[string]interface{}) + } + o.Tags[t.Key] = t.Value +} + +// Set applies the tag to an existing Span. +func (t Tag) Set(s Span) { + s.SetTag(t.Key, t.Value) +} diff --git a/vendor/github.com/rjeczalik/notify/appveyor.yml b/vendor/github.com/rjeczalik/notify/appveyor.yml index a495bd7c7300..a0bdc37a389a 100644 --- a/vendor/github.com/rjeczalik/notify/appveyor.yml +++ b/vendor/github.com/rjeczalik/notify/appveyor.yml @@ -7,16 +7,20 @@ clone_folder: c:\projects\src\github.com\rjeczalik\notify environment: PATH: c:\projects\bin;%PATH% GOPATH: c:\projects - NOTIFY_TIMEOUT: 5s + NOTIFY_TIMEOUT: 10s + GOVERSION: 1.10.3 install: + - rmdir c:\go /s /q + - appveyor DownloadFile https://storage.googleapis.com/golang/go%GOVERSION%.windows-amd64.zip + - 7z x go%GOVERSION%.windows-amd64.zip -y -oC:\ > NUL + + - cd %APPVEYOR_BUILD_FOLDER% - go version - - go get -v -t ./... build_script: - - go tool vet -all . - go build ./... - - go test -v -timeout 60s -race ./... + - go test -v -timeout 120s -race ./... test: off diff --git a/vendor/github.com/rjeczalik/notify/debug_debug.go b/vendor/github.com/rjeczalik/notify/debug_debug.go index 6fca891ab305..9d234cedda4f 100644 --- a/vendor/github.com/rjeczalik/notify/debug_debug.go +++ b/vendor/github.com/rjeczalik/notify/debug_debug.go @@ -1,4 +1,4 @@ -// Copyright (c) 2014-2015 The Notify Authors. All rights reserved. +// Copyright (c) 2014-2018 The Notify Authors. All rights reserved. // Use of this source code is governed by the MIT license that can be // found in the LICENSE file. @@ -6,4 +6,4 @@ package notify -var debugTag bool = true +var debugTag = true diff --git a/vendor/github.com/rjeczalik/notify/debug_nodebug.go b/vendor/github.com/rjeczalik/notify/debug_nodebug.go index be391a2769d9..9ebf880d880b 100644 --- a/vendor/github.com/rjeczalik/notify/debug_nodebug.go +++ b/vendor/github.com/rjeczalik/notify/debug_nodebug.go @@ -1,4 +1,4 @@ -// Copyright (c) 2014-2015 The Notify Authors. All rights reserved. +// Copyright (c) 2014-2018 The Notify Authors. All rights reserved. // Use of this source code is governed by the MIT license that can be // found in the LICENSE file. @@ -6,4 +6,4 @@ package notify -var debugTag bool = false +var debugTag = false diff --git a/vendor/github.com/rjeczalik/notify/watcher_fsevents_cgo.go b/vendor/github.com/rjeczalik/notify/watcher_fsevents_cgo.go index a2b332a2e05d..95ee704444c9 100644 --- a/vendor/github.com/rjeczalik/notify/watcher_fsevents_cgo.go +++ b/vendor/github.com/rjeczalik/notify/watcher_fsevents_cgo.go @@ -48,7 +48,7 @@ var wg sync.WaitGroup // used to wait until the runloop starts // started and is ready via the wg. It also serves purpose of a dummy source, // thanks to it the runloop does not return as it also has at least one source // registered. -var source = C.CFRunLoopSourceCreate(nil, 0, &C.CFRunLoopSourceContext{ +var source = C.CFRunLoopSourceCreate(refZero, 0, &C.CFRunLoopSourceContext{ perform: (C.CFRunLoopPerformCallBack)(C.gosource), }) @@ -90,6 +90,10 @@ func gostream(_, info uintptr, n C.size_t, paths, flags, ids uintptr) { if n == 0 { return } + fn := streamFuncs.get(info) + if fn == nil { + return + } ev := make([]FSEvent, 0, int(n)) for i := uintptr(0); i < uintptr(n); i++ { switch flags := *(*uint32)(unsafe.Pointer((flags + i*offflag))); { @@ -104,7 +108,7 @@ func gostream(_, info uintptr, n C.size_t, paths, flags, ids uintptr) { } } - streamFuncs.get(info)(ev) + fn(ev) } // StreamFunc is a callback called when stream receives file events. @@ -162,8 +166,8 @@ func (s *stream) Start() error { return nil } wg.Wait() - p := C.CFStringCreateWithCStringNoCopy(nil, C.CString(s.path), C.kCFStringEncodingUTF8, nil) - path := C.CFArrayCreate(nil, (*unsafe.Pointer)(unsafe.Pointer(&p)), 1, nil) + p := C.CFStringCreateWithCStringNoCopy(refZero, C.CString(s.path), C.kCFStringEncodingUTF8, refZero) + path := C.CFArrayCreate(refZero, (*unsafe.Pointer)(unsafe.Pointer(&p)), 1, nil) ctx := C.FSEventStreamContext{} ref := C.EventStreamCreate(&ctx, C.uintptr_t(s.info), path, C.FSEventStreamEventId(atomic.LoadUint64(&since)), latency, flags) if ref == nilstream { diff --git a/vendor/github.com/rjeczalik/notify/watcher_fsevents_go1.10.go b/vendor/github.com/rjeczalik/notify/watcher_fsevents_go1.10.go new file mode 100644 index 000000000000..dd2433d20a7b --- /dev/null +++ b/vendor/github.com/rjeczalik/notify/watcher_fsevents_go1.10.go @@ -0,0 +1,14 @@ +// Copyright (c) 2018 The Notify Authors. All rights reserved. +// Use of this source code is governed by the MIT license that can be +// found in the LICENSE file. + +// +build darwin,!kqueue,cgo,!go1.11 + +package notify + +/* + #include +*/ +import "C" + +var refZero = (*C.struct___CFAllocator)(nil) diff --git a/vendor/github.com/rjeczalik/notify/watcher_fsevents_go1.11.go b/vendor/github.com/rjeczalik/notify/watcher_fsevents_go1.11.go new file mode 100644 index 000000000000..92b406ce7ca5 --- /dev/null +++ b/vendor/github.com/rjeczalik/notify/watcher_fsevents_go1.11.go @@ -0,0 +1,9 @@ +// Copyright (c) 2018 The Notify Authors. All rights reserved. +// Use of this source code is governed by the MIT license that can be +// found in the LICENSE file. + +// +build darwin,!kqueue,go1.11 + +package notify + +const refZero = 0 diff --git a/vendor/github.com/rjeczalik/notify/watcher_notimplemented.go b/vendor/github.com/rjeczalik/notify/watcher_notimplemented.go new file mode 100644 index 000000000000..bb0672fd88bc --- /dev/null +++ b/vendor/github.com/rjeczalik/notify/watcher_notimplemented.go @@ -0,0 +1,15 @@ +// Copyright (c) 2014-2018 The Notify Authors. All rights reserved. +// Use of this source code is governed by the MIT license that can be +// found in the LICENSE file. + +// +build !darwin,!linux,!freebsd,!dragonfly,!netbsd,!openbsd,!windows +// +build !kqueue,!solaris + +package notify + +import "errors" + +// newWatcher stub. +func newWatcher(chan<- EventInfo) watcher { + return watcherStub{errors.New("notify: not implemented")} +} diff --git a/vendor/github.com/rjeczalik/notify/watcher_readdcw.go b/vendor/github.com/rjeczalik/notify/watcher_readdcw.go index 1494fcd79993..b69811a690f9 100644 --- a/vendor/github.com/rjeczalik/notify/watcher_readdcw.go +++ b/vendor/github.com/rjeczalik/notify/watcher_readdcw.go @@ -1,4 +1,4 @@ -// Copyright (c) 2014-2015 The Notify Authors. All rights reserved. +// Copyright (c) 2014-2018 The Notify Authors. All rights reserved. // Use of this source code is governed by the MIT license that can be // found in the LICENSE file. @@ -22,7 +22,7 @@ import ( const readBufferSize = 4096 // Since all operations which go through the Windows completion routine are done -// asynchronously, filter may set one of the constants belor. They were defined +// asynchronously, filter may set one of the constants below. They were defined // in order to distinguish whether current folder should be re-registered in // ReadDirectoryChangesW function or some control operations need to be executed. const ( @@ -109,8 +109,13 @@ func (g *grip) register(cph syscall.Handle) (err error) { // buffer. Directory changes that occur between calls to this function are added // to the buffer and then, returned with the next call. func (g *grip) readDirChanges() error { + handle := syscall.Handle(atomic.LoadUintptr((*uintptr)(&g.handle))) + if handle == syscall.InvalidHandle { + return nil // Handle was closed. + } + return syscall.ReadDirectoryChanges( - g.handle, + handle, &g.buffer[0], uint32(unsafe.Sizeof(g.buffer)), g.recursive, @@ -220,12 +225,27 @@ func (wd *watched) updateGrip(idx int, cph syscall.Handle, reset bool, // returned from the operating system kernel. func (wd *watched) closeHandle() (err error) { for _, g := range wd.digrip { - if g != nil && g.handle != syscall.InvalidHandle { - switch suberr := syscall.CloseHandle(g.handle); { - case suberr == nil: - g.handle = syscall.InvalidHandle - case err == nil: - err = suberr + if g == nil { + continue + } + + for { + handle := syscall.Handle(atomic.LoadUintptr((*uintptr)(&g.handle))) + if handle == syscall.InvalidHandle { + break // Already closed. + } + + e := syscall.CloseHandle(handle) + if e != nil && err == nil { + err = e + } + + // Set invalid handle even when CloseHandle fails. This will leak + // the handle but, since we can't close it anyway, there won't be + // any difference. + if atomic.CompareAndSwapUintptr((*uintptr)(&g.handle), + (uintptr)(handle), (uintptr)(syscall.InvalidHandle)) { + break } } } @@ -272,50 +292,49 @@ func (r *readdcw) RecursiveWatch(path string, event Event) error { // watch inserts a directory to the group of watched folders. If watched folder // already exists, function tries to rewatch it with new filters(NOT VALID). Moreover, // watch starts the main event loop goroutine when called for the first time. -func (r *readdcw) watch(path string, event Event, recursive bool) (err error) { +func (r *readdcw) watch(path string, event Event, recursive bool) error { if event&^(All|fileNotifyChangeAll) != 0 { return errors.New("notify: unknown event") } + r.Lock() - wd, ok := r.m[path] - r.Unlock() - if !ok { - if err = r.lazyinit(); err != nil { - return - } - r.Lock() - defer r.Unlock() - if wd, ok = r.m[path]; ok { - dbgprint("watch: exists already") - return - } - if wd, err = newWatched(r.cph, uint32(event), recursive, path); err != nil { - return - } - r.m[path] = wd - dbgprint("watch: new watch added") - } else { - dbgprint("watch: exists already") + defer r.Unlock() + + if wd, ok := r.m[path]; ok { + dbgprint("watch: already exists") + wd.filter &^= stateUnwatch + return nil } + + if err := r.lazyinit(); err != nil { + return err + } + + wd, err := newWatched(r.cph, uint32(event), recursive, path) + if err != nil { + return err + } + + r.m[path] = wd + dbgprint("watch: new watch added") + return nil } -// lazyinit creates an I/O completion port and starts the main event processing -// loop. This method uses Double-Checked Locking optimization. +// lazyinit creates an I/O completion port and starts the main event loop. func (r *readdcw) lazyinit() (err error) { invalid := uintptr(syscall.InvalidHandle) + if atomic.LoadUintptr((*uintptr)(&r.cph)) == invalid { - r.Lock() - defer r.Unlock() - if atomic.LoadUintptr((*uintptr)(&r.cph)) == invalid { - cph := syscall.InvalidHandle - if cph, err = syscall.CreateIoCompletionPort(cph, 0, 0, 0); err != nil { - return - } - r.cph, r.start = cph, true - go r.loop() + cph := syscall.InvalidHandle + if cph, err = syscall.CreateIoCompletionPort(cph, 0, 0, 0); err != nil { + return } + + r.cph, r.start = cph, true + go r.loop() } + return } @@ -364,6 +383,7 @@ func (r *readdcw) loopstate(overEx *overlappedEx) { overEx.parent.parent.recreate(r.cph) case stateUnwatch: dbgprint("loopstate unwatch") + overEx.parent.parent.closeHandle() delete(r.m, syscall.UTF16ToString(overEx.parent.pathw)) case stateCPClose: default: @@ -495,27 +515,30 @@ func (r *readdcw) RecursiveUnwatch(path string) error { // TODO : pknap func (r *readdcw) unwatch(path string) (err error) { var wd *watched + r.Lock() defer r.Unlock() if wd, err = r.nonStateWatchedLocked(path); err != nil { return } + wd.filter |= stateUnwatch - if err = wd.closeHandle(); err != nil { - wd.filter &^= stateUnwatch - return - } + dbgprint("unwatch: set unwatch state") + if _, attrErr := syscall.GetFileAttributes(&wd.pathw[0]); attrErr != nil { for _, g := range wd.digrip { - if g != nil { - dbgprint("unwatch: posting") - if err = syscall.PostQueuedCompletionStatus(r.cph, 0, 0, (*syscall.Overlapped)(unsafe.Pointer(g.ovlapped))); err != nil { - wd.filter &^= stateUnwatch - return - } + if g == nil { + continue + } + + dbgprint("unwatch: posting") + if err = syscall.PostQueuedCompletionStatus(r.cph, 0, 0, (*syscall.Overlapped)(unsafe.Pointer(g.ovlapped))); err != nil { + wd.filter &^= stateUnwatch + return } } } + return } diff --git a/vendor/github.com/rjeczalik/notify/watcher_stub.go b/vendor/github.com/rjeczalik/notify/watcher_stub.go index 68b9c135b0c8..9b284ddc85f3 100644 --- a/vendor/github.com/rjeczalik/notify/watcher_stub.go +++ b/vendor/github.com/rjeczalik/notify/watcher_stub.go @@ -1,23 +1,13 @@ -// Copyright (c) 2014-2015 The Notify Authors. All rights reserved. +// Copyright (c) 2014-2018 The Notify Authors. All rights reserved. // Use of this source code is governed by the MIT license that can be // found in the LICENSE file. -// +build !darwin,!linux,!freebsd,!dragonfly,!netbsd,!openbsd,!windows -// +build !kqueue,!solaris - package notify -import "errors" - -type stub struct{ error } - -// newWatcher stub. -func newWatcher(chan<- EventInfo) watcher { - return stub{errors.New("notify: not implemented")} -} +type watcherStub struct{ error } // Following methods implement notify.watcher interface. -func (s stub) Watch(string, Event) error { return s } -func (s stub) Rewatch(string, Event, Event) error { return s } -func (s stub) Unwatch(string) (err error) { return s } -func (s stub) Close() error { return s } +func (s watcherStub) Watch(string, Event) error { return s } +func (s watcherStub) Rewatch(string, Event, Event) error { return s } +func (s watcherStub) Unwatch(string) (err error) { return s } +func (s watcherStub) Close() error { return s } diff --git a/vendor/github.com/rjeczalik/notify/watcher_trigger.go b/vendor/github.com/rjeczalik/notify/watcher_trigger.go index 78151f909bc9..1ebe04829ee6 100644 --- a/vendor/github.com/rjeczalik/notify/watcher_trigger.go +++ b/vendor/github.com/rjeczalik/notify/watcher_trigger.go @@ -106,7 +106,8 @@ func newWatcher(c chan<- EventInfo) watcher { } t.t = newTrigger(t.pthLkp) if err := t.t.Init(); err != nil { - panic(err) + t.Close() + return watcherStub{fmt.Errorf("failed setting up watcher: %v", err)} } go t.monitor() return t diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/db_compaction.go b/vendor/github.com/syndtr/goleveldb/leveldb/db_compaction.go index b6563e87e475..28e50906adb6 100644 --- a/vendor/github.com/syndtr/goleveldb/leveldb/db_compaction.go +++ b/vendor/github.com/syndtr/goleveldb/leveldb/db_compaction.go @@ -640,6 +640,16 @@ func (db *DB) tableNeedCompaction() bool { return v.needCompaction() } +// resumeWrite returns an indicator whether we should resume write operation if enough level0 files are compacted. +func (db *DB) resumeWrite() bool { + v := db.s.version() + defer v.release() + if v.tLen(0) < db.s.o.GetWriteL0PauseTrigger() { + return true + } + return false +} + func (db *DB) pauseCompaction(ch chan<- struct{}) { select { case ch <- struct{}{}: @@ -653,6 +663,7 @@ type cCmd interface { } type cAuto struct { + // Note for table compaction, an empty ackC represents it's a compaction waiting command. ackC chan<- error } @@ -765,8 +776,10 @@ func (db *DB) mCompaction() { } func (db *DB) tCompaction() { - var x cCmd - var ackQ []cCmd + var ( + x cCmd + ackQ, waitQ []cCmd + ) defer func() { if x := recover(); x != nil { @@ -778,6 +791,10 @@ func (db *DB) tCompaction() { ackQ[i].ack(ErrClosed) ackQ[i] = nil } + for i := range waitQ { + waitQ[i].ack(ErrClosed) + waitQ[i] = nil + } if x != nil { x.ack(ErrClosed) } @@ -795,12 +812,25 @@ func (db *DB) tCompaction() { return default: } + // Resume write operation as soon as possible. + if len(waitQ) > 0 && db.resumeWrite() { + for i := range waitQ { + waitQ[i].ack(nil) + waitQ[i] = nil + } + waitQ = waitQ[:0] + } } else { for i := range ackQ { ackQ[i].ack(nil) ackQ[i] = nil } ackQ = ackQ[:0] + for i := range waitQ { + waitQ[i].ack(nil) + waitQ[i] = nil + } + waitQ = waitQ[:0] select { case x = <-db.tcompCmdC: case ch := <-db.tcompPauseC: @@ -813,7 +843,11 @@ func (db *DB) tCompaction() { if x != nil { switch cmd := x.(type) { case cAuto: - ackQ = append(ackQ, x) + if cmd.ackC != nil { + waitQ = append(waitQ, x) + } else { + ackQ = append(ackQ, x) + } case cRange: x.ack(db.tableRangeCompaction(cmd.level, cmd.min, cmd.max)) default: diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/storage/file_storage.go b/vendor/github.com/syndtr/goleveldb/leveldb/storage/file_storage.go index 1189decac612..9ba71fd6d107 100644 --- a/vendor/github.com/syndtr/goleveldb/leveldb/storage/file_storage.go +++ b/vendor/github.com/syndtr/goleveldb/leveldb/storage/file_storage.go @@ -9,10 +9,12 @@ package storage import ( "errors" "fmt" + "io" "io/ioutil" "os" "path/filepath" "runtime" + "sort" "strconv" "strings" "sync" @@ -42,6 +44,30 @@ func (lock *fileStorageLock) Unlock() { } } +type int64Slice []int64 + +func (p int64Slice) Len() int { return len(p) } +func (p int64Slice) Less(i, j int) bool { return p[i] < p[j] } +func (p int64Slice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } + +func writeFileSynced(filename string, data []byte, perm os.FileMode) error { + f, err := os.OpenFile(filename, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, perm) + if err != nil { + return err + } + n, err := f.Write(data) + if err == nil && n < len(data) { + err = io.ErrShortWrite + } + if err1 := f.Sync(); err == nil { + err = err1 + } + if err1 := f.Close(); err == nil { + err = err1 + } + return err +} + const logSizeThreshold = 1024 * 1024 // 1 MiB // fileStorage is a file-system backed storage. @@ -60,7 +86,7 @@ type fileStorage struct { day int } -// OpenFile returns a new filesytem-backed storage implementation with the given +// OpenFile returns a new filesystem-backed storage implementation with the given // path. This also acquire a file lock, so any subsequent attempt to open the // same path will fail. // @@ -189,7 +215,8 @@ func (fs *fileStorage) doLog(t time.Time, str string) { // write fs.buf = append(fs.buf, []byte(str)...) fs.buf = append(fs.buf, '\n') - fs.logw.Write(fs.buf) + n, _ := fs.logw.Write(fs.buf) + fs.logSize += int64(n) } func (fs *fileStorage) Log(str string) { @@ -210,7 +237,46 @@ func (fs *fileStorage) log(str string) { } } -func (fs *fileStorage) SetMeta(fd FileDesc) (err error) { +func (fs *fileStorage) setMeta(fd FileDesc) error { + content := fsGenName(fd) + "\n" + // Check and backup old CURRENT file. + currentPath := filepath.Join(fs.path, "CURRENT") + if _, err := os.Stat(currentPath); err == nil { + b, err := ioutil.ReadFile(currentPath) + if err != nil { + fs.log(fmt.Sprintf("backup CURRENT: %v", err)) + return err + } + if string(b) == content { + // Content not changed, do nothing. + return nil + } + if err := writeFileSynced(currentPath+".bak", b, 0644); err != nil { + fs.log(fmt.Sprintf("backup CURRENT: %v", err)) + return err + } + } else if !os.IsNotExist(err) { + return err + } + path := fmt.Sprintf("%s.%d", filepath.Join(fs.path, "CURRENT"), fd.Num) + if err := writeFileSynced(path, []byte(content), 0644); err != nil { + fs.log(fmt.Sprintf("create CURRENT.%d: %v", fd.Num, err)) + return err + } + // Replace CURRENT file. + if err := rename(path, currentPath); err != nil { + fs.log(fmt.Sprintf("rename CURRENT.%d: %v", fd.Num, err)) + return err + } + // Sync root directory. + if err := syncDir(fs.path); err != nil { + fs.log(fmt.Sprintf("syncDir: %v", err)) + return err + } + return nil +} + +func (fs *fileStorage) SetMeta(fd FileDesc) error { if !FileDescOk(fd) { return ErrInvalidFile } @@ -223,44 +289,10 @@ func (fs *fileStorage) SetMeta(fd FileDesc) (err error) { if fs.open < 0 { return ErrClosed } - defer func() { - if err != nil { - fs.log(fmt.Sprintf("CURRENT: %v", err)) - } - }() - path := fmt.Sprintf("%s.%d", filepath.Join(fs.path, "CURRENT"), fd.Num) - w, err := os.OpenFile(path, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644) - if err != nil { - return - } - _, err = fmt.Fprintln(w, fsGenName(fd)) - if err != nil { - fs.log(fmt.Sprintf("write CURRENT.%d: %v", fd.Num, err)) - return - } - if err = w.Sync(); err != nil { - fs.log(fmt.Sprintf("flush CURRENT.%d: %v", fd.Num, err)) - return - } - if err = w.Close(); err != nil { - fs.log(fmt.Sprintf("close CURRENT.%d: %v", fd.Num, err)) - return - } - if err != nil { - return - } - if err = rename(path, filepath.Join(fs.path, "CURRENT")); err != nil { - fs.log(fmt.Sprintf("rename CURRENT.%d: %v", fd.Num, err)) - return - } - // Sync root directory. - if err = syncDir(fs.path); err != nil { - fs.log(fmt.Sprintf("syncDir: %v", err)) - } - return + return fs.setMeta(fd) } -func (fs *fileStorage) GetMeta() (fd FileDesc, err error) { +func (fs *fileStorage) GetMeta() (FileDesc, error) { fs.mu.Lock() defer fs.mu.Unlock() if fs.open < 0 { @@ -268,7 +300,7 @@ func (fs *fileStorage) GetMeta() (fd FileDesc, err error) { } dir, err := os.Open(fs.path) if err != nil { - return + return FileDesc{}, err } names, err := dir.Readdirnames(0) // Close the dir first before checking for Readdirnames error. @@ -276,94 +308,134 @@ func (fs *fileStorage) GetMeta() (fd FileDesc, err error) { fs.log(fmt.Sprintf("close dir: %v", ce)) } if err != nil { - return - } - // Find latest CURRENT file. - var rem []string - var pend bool - var cerr error - for _, name := range names { - if strings.HasPrefix(name, "CURRENT") { - pend1 := len(name) > 7 - var pendNum int64 - // Make sure it is valid name for a CURRENT file, otherwise skip it. - if pend1 { - if name[7] != '.' || len(name) < 9 { - fs.log(fmt.Sprintf("skipping %s: invalid file name", name)) - continue - } - var e1 error - if pendNum, e1 = strconv.ParseInt(name[8:], 10, 0); e1 != nil { - fs.log(fmt.Sprintf("skipping %s: invalid file num: %v", name, e1)) - continue - } + return FileDesc{}, err + } + // Try this in order: + // - CURRENT.[0-9]+ ('pending rename' file, descending order) + // - CURRENT + // - CURRENT.bak + // + // Skip corrupted file or file that point to a missing target file. + type currentFile struct { + name string + fd FileDesc + } + tryCurrent := func(name string) (*currentFile, error) { + b, err := ioutil.ReadFile(filepath.Join(fs.path, name)) + if err != nil { + if os.IsNotExist(err) { + err = os.ErrNotExist } - path := filepath.Join(fs.path, name) - r, e1 := os.OpenFile(path, os.O_RDONLY, 0) - if e1 != nil { - return FileDesc{}, e1 + return nil, err + } + var fd FileDesc + if len(b) < 1 || b[len(b)-1] != '\n' || !fsParseNamePtr(string(b[:len(b)-1]), &fd) { + fs.log(fmt.Sprintf("%s: corrupted content: %q", name, b)) + err := &ErrCorrupted{ + Err: errors.New("leveldb/storage: corrupted or incomplete CURRENT file"), } - b, e1 := ioutil.ReadAll(r) - if e1 != nil { - r.Close() - return FileDesc{}, e1 + return nil, err + } + if _, err := os.Stat(filepath.Join(fs.path, fsGenName(fd))); err != nil { + if os.IsNotExist(err) { + fs.log(fmt.Sprintf("%s: missing target file: %s", name, fd)) + err = os.ErrNotExist } - var fd1 FileDesc - if len(b) < 1 || b[len(b)-1] != '\n' || !fsParseNamePtr(string(b[:len(b)-1]), &fd1) { - fs.log(fmt.Sprintf("skipping %s: corrupted or incomplete", name)) - if pend1 { - rem = append(rem, name) - } - if !pend1 || cerr == nil { - metaFd, _ := fsParseName(name) - cerr = &ErrCorrupted{ - Fd: metaFd, - Err: errors.New("leveldb/storage: corrupted or incomplete meta file"), - } - } - } else if pend1 && pendNum != fd1.Num { - fs.log(fmt.Sprintf("skipping %s: inconsistent pending-file num: %d vs %d", name, pendNum, fd1.Num)) - rem = append(rem, name) - } else if fd1.Num < fd.Num { - fs.log(fmt.Sprintf("skipping %s: obsolete", name)) - if pend1 { - rem = append(rem, name) - } + return nil, err + } + return ¤tFile{name: name, fd: fd}, nil + } + tryCurrents := func(names []string) (*currentFile, error) { + var ( + cur *currentFile + // Last corruption error. + lastCerr error + ) + for _, name := range names { + var err error + cur, err = tryCurrent(name) + if err == nil { + break + } else if err == os.ErrNotExist { + // Fallback to the next file. + } else if isCorrupted(err) { + lastCerr = err + // Fallback to the next file. } else { - fd = fd1 - pend = pend1 + // In case the error is due to permission, etc. + return nil, err } - if err := r.Close(); err != nil { - fs.log(fmt.Sprintf("close %s: %v", name, err)) + } + if cur == nil { + err := os.ErrNotExist + if lastCerr != nil { + err = lastCerr } + return nil, err } + return cur, nil } - // Don't remove any files if there is no valid CURRENT file. - if fd.Zero() { - if cerr != nil { - err = cerr - } else { - err = os.ErrNotExist + + // Try 'pending rename' files. + var nums []int64 + for _, name := range names { + if strings.HasPrefix(name, "CURRENT.") && name != "CURRENT.bak" { + i, err := strconv.ParseInt(name[8:], 10, 64) + if err == nil { + nums = append(nums, i) + } } - return } - if !fs.readOnly { - // Rename pending CURRENT file to an effective CURRENT. - if pend { - path := fmt.Sprintf("%s.%d", filepath.Join(fs.path, "CURRENT"), fd.Num) - if err := rename(path, filepath.Join(fs.path, "CURRENT")); err != nil { - fs.log(fmt.Sprintf("CURRENT.%d -> CURRENT: %v", fd.Num, err)) - } + var ( + pendCur *currentFile + pendErr = os.ErrNotExist + pendNames []string + ) + if len(nums) > 0 { + sort.Sort(sort.Reverse(int64Slice(nums))) + pendNames = make([]string, len(nums)) + for i, num := range nums { + pendNames[i] = fmt.Sprintf("CURRENT.%d", num) } - // Remove obsolete or incomplete pending CURRENT files. - for _, name := range rem { - path := filepath.Join(fs.path, name) - if err := os.Remove(path); err != nil { - fs.log(fmt.Sprintf("remove %s: %v", name, err)) + pendCur, pendErr = tryCurrents(pendNames) + if pendErr != nil && pendErr != os.ErrNotExist && !isCorrupted(pendErr) { + return FileDesc{}, pendErr + } + } + + // Try CURRENT and CURRENT.bak. + curCur, curErr := tryCurrents([]string{"CURRENT", "CURRENT.bak"}) + if curErr != nil && curErr != os.ErrNotExist && !isCorrupted(curErr) { + return FileDesc{}, curErr + } + + // pendCur takes precedence, but guards against obsolete pendCur. + if pendCur != nil && (curCur == nil || pendCur.fd.Num > curCur.fd.Num) { + curCur = pendCur + } + + if curCur != nil { + // Restore CURRENT file to proper state. + if !fs.readOnly && (curCur.name != "CURRENT" || len(pendNames) != 0) { + // Ignore setMeta errors, however don't delete obsolete files if we + // catch error. + if err := fs.setMeta(curCur.fd); err == nil { + // Remove 'pending rename' files. + for _, name := range pendNames { + if err := os.Remove(filepath.Join(fs.path, name)); err != nil { + fs.log(fmt.Sprintf("remove %s: %v", name, err)) + } + } } } + return curCur.fd, nil } - return + + // Nothing found. + if isCorrupted(pendErr) { + return FileDesc{}, pendErr + } + return FileDesc{}, curErr } func (fs *fileStorage) List(ft FileType) (fds []FileDesc, err error) { diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/storage/file_storage_unix.go b/vendor/github.com/syndtr/goleveldb/leveldb/storage/file_storage_unix.go index 7e29915379e1..d75f66a9efc6 100644 --- a/vendor/github.com/syndtr/goleveldb/leveldb/storage/file_storage_unix.go +++ b/vendor/github.com/syndtr/goleveldb/leveldb/storage/file_storage_unix.go @@ -67,13 +67,25 @@ func isErrInvalid(err error) bool { if err == os.ErrInvalid { return true } + // Go < 1.8 if syserr, ok := err.(*os.SyscallError); ok && syserr.Err == syscall.EINVAL { return true } + // Go >= 1.8 returns *os.PathError instead + if patherr, ok := err.(*os.PathError); ok && patherr.Err == syscall.EINVAL { + return true + } return false } func syncDir(name string) error { + // As per fsync manpage, Linux seems to expect fsync on directory, however + // some system don't support this, so we will ignore syscall.EINVAL. + // + // From fsync(2): + // Calling fsync() does not necessarily ensure that the entry in the + // directory containing the file has also reached disk. For that an + // explicit fsync() on a file descriptor for the directory is also needed. f, err := os.Open(name) if err != nil { return err diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/storage/storage.go b/vendor/github.com/syndtr/goleveldb/leveldb/storage/storage.go index c16bce6b6652..4e4a724258d6 100644 --- a/vendor/github.com/syndtr/goleveldb/leveldb/storage/storage.go +++ b/vendor/github.com/syndtr/goleveldb/leveldb/storage/storage.go @@ -55,6 +55,14 @@ type ErrCorrupted struct { Err error } +func isCorrupted(err error) bool { + switch err.(type) { + case *ErrCorrupted: + return true + } + return false +} + func (e *ErrCorrupted) Error() string { if !e.Fd.Zero() { return fmt.Sprintf("%v [file=%v]", e.Err, e.Fd) diff --git a/vendor/github.com/uber/jaeger-client-go/CHANGELOG.md b/vendor/github.com/uber/jaeger-client-go/CHANGELOG.md new file mode 100644 index 000000000000..28e2c242558b --- /dev/null +++ b/vendor/github.com/uber/jaeger-client-go/CHANGELOG.md @@ -0,0 +1,186 @@ +Changes by Version +================== + +2.15.0 (unreleased) +------------------- + +- nothing yet + + +2.14.0 (2018-04-30) +------------------- + +- Support throttling for debug traces (#274) +- Remove dependency on Apache Thrift (#303) +- Remove dependency on tchannel (#295) (#294) +- Test with Go 1.9 (#298) + + +2.13.0 (2018-04-15) +------------------- + +- Use value receiver for config.NewTracer() (#283) +- Lock span during jaeger thrift conversion (#273) +- Fix the RemotelyControlledSampler so that it terminates go-routine on Close() (#260) +- Added support for client configuration via env vars (#275) +- Allow overriding sampler in the Config (#270) + + +2.12.0 (2018-03-14) +------------------- + +- Use lock when retrieving span.Context() (#268) +- Add Configuration support for custom Injector and Extractor (#263) + + +2.11.2 (2018-01-12) +------------------- + +- Add Gopkg.toml to allow using the lib with `dep` + + +2.11.1 (2018-01-03) +------------------- + +- Do not enqueue spans after Reporter is closed (#235, #245) +- Change default flush interval to 1sec (#243) + + +2.11.0 (2017-11-27) +------------------- + +- Normalize metric names and tags to be compatible with Prometheus (#222) + + +2.10.0 (2017-11-14) +------------------- + +- Support custom tracing headers (#176) +- Add BaggageRestrictionManager (#178) and RemoteBaggageRestrictionManager (#182) +- Do not coerce baggage keys to lower case (#196) +- Log span name when span cannot be reported (#198) +- Add option to enable gen128Bit for tracer (#193) and allow custom generator for high bits of trace ID (#219) + + +2.9.0 (2017-07-29) +------------------ + +- Pin thrift <= 0.10 (#179) +- Introduce a parallel interface ContribObserver (#159) + + +2.8.0 (2017-07-05) +------------------ + +- Drop `jaeger.` prefix from `jaeger.hostname` process-level tag +- Add options to set tracer tags + + +2.7.0 (2017-06-21) +------------------ + +- Fix rate limiter balance [#135](https://github.com/uber/jaeger-client-go/pull/135) [#140](https://github.com/uber/jaeger-client-go/pull/140) +- Default client to send Jaeger.thrift [#147](https://github.com/uber/jaeger-client-go/pull/147) +- Save baggage in span [#153](https://github.com/uber/jaeger-client-go/pull/153) +- Move reporter.queueLength to the top of the struct to guarantee 64bit alignment [#158](https://github.com/uber/jaeger-client-go/pull/158) +- Support HTTP transport with jaeger.thrift [#161](https://github.com/uber/jaeger-client-go/pull/161) + + +2.6.0 (2017-03-28) +------------------ + +- Add config option to initialize RPC Metrics feature + + +2.5.0 (2017-03-23) +------------------ + +- Split request latency metric by success/failure [#123](https://github.com/uber/jaeger-client-go/pull/123) +- Add mutex to adaptive sampler and fix race condition [#124](https://github.com/uber/jaeger-client-go/pull/124) +- Fix rate limiter panic [#125](https://github.com/uber/jaeger-client-go/pull/125) + + +2.4.0 (2017-03-21) +------------------ + +- Remove `_ms` suffix from request latency metric name [#121](https://github.com/uber/jaeger-client-go/pull/121) +- Rename all metrics to "request" and "http_request" and use tags for other dimensions [#121](https://github.com/uber/jaeger-client-go/pull/121) + + +2.3.0 (2017-03-20) +------------------ + +- Make Span type public to allow access to non-std methods for testing [#117](https://github.com/uber/jaeger-client-go/pull/117) +- Add a structured way to extract traces for logging with zap [#118](https://github.com/uber/jaeger-client-go/pull/118) + + +2.2.1 (2017-03-14) +------------------ + +- Fix panic caused by updating the remote sampler from adaptive sampler to any other sampler type (https://github.com/uber/jaeger-client-go/pull/111) + + +2.2.0 (2017-03-10) +------------------ + +- Introduce Observer and SpanObserver (https://github.com/uber/jaeger-client-go/pull/94) +- Add RPC metrics emitter as Observer/SpanObserver (https://github.com/uber/jaeger-client-go/pull/103) + + +2.1.2 (2017-02-27) +------------------- + +- Fix leaky bucket bug (https://github.com/uber/jaeger-client-go/pull/99) +- Fix zap logger Infof (https://github.com/uber/jaeger-client-go/pull/100) +- Add tracer initialization godoc examples + + +2.1.1 (2017-02-21) +------------------- + +- Fix inefficient usage of zap.Logger + + +2.1.0 (2017-02-17) +------------------- + +- Add adapter for zap.Logger (https://github.com/uber-go/zap) +- Move logging API to ./log/ package + + +2.0.0 (2017-02-08) +------------------- + +- Support Adaptive Sampling +- Support 128bit Trace IDs +- Change trace/span IDs from uint64 to strong types TraceID and SpanID +- Add Zipkin HTTP B3 Propagation format support #72 +- Rip out existing metrics and use github.com/uber/jaeger-lib/metrics +- Change API for tracer, reporter, sampler initialization + + +1.6.0 (2016-10-14) +------------------- + +- Add Zipkin HTTP transport +- Support external baggage via jaeger-baggage header +- Unpin Thrift version, keep to master + + +1.5.1 (2016-09-27) +------------------- + +- Relax dependency on opentracing to ^1 + + +1.5.0 (2016-09-27) +------------------- + +- Upgrade to opentracing-go 1.0 +- Support KV logging for Spans + + +1.4.0 (2016-09-14) +------------------- + +- Support debug traces via HTTP header "jaeger-debug-id" diff --git a/vendor/github.com/uber/jaeger-client-go/CONTRIBUTING.md b/vendor/github.com/uber/jaeger-client-go/CONTRIBUTING.md new file mode 100644 index 000000000000..7cf014a51edb --- /dev/null +++ b/vendor/github.com/uber/jaeger-client-go/CONTRIBUTING.md @@ -0,0 +1,170 @@ +# How to Contribute to Jaeger + +We'd love your help! + +Jaeger is [Apache 2.0 licensed](LICENSE) and accepts contributions via GitHub +pull requests. This document outlines some of the conventions on development +workflow, commit message formatting, contact points and other resources to make +it easier to get your contribution accepted. + +We gratefully welcome improvements to documentation as well as to code. + +# Certificate of Origin + +By contributing to this project you agree to the [Developer Certificate of +Origin](https://developercertificate.org/) (DCO). This document was created +by the Linux Kernel community and is a simple statement that you, as a +contributor, have the legal right to make the contribution. See the [DCO](DCO) +file for details. + +## Getting Started + +This library uses [glide](https://github.com/Masterminds/glide) to manage dependencies. + +To get started, make sure you clone the Git repository into the correct location +`github.com/uber/jaeger-client-go` relative to `$GOPATH`: + +``` +mkdir -p $GOPATH/src/github.com/uber +cd $GOPATH/src/github.com/uber +git clone git@github.com:jaegertracing/jaeger-client-go.git jaeger-client-go +cd jaeger-client-go +``` + +Then install dependencies and run the tests: + +``` +git submodule update --init --recursive +glide install +make test +``` + +## Imports grouping + +This projects follows the following pattern for grouping imports in Go files: + * imports from standard library + * imports from other projects + * imports from `jaeger-client-go` project + +For example: + +```go +import ( + "fmt" + + "github.com/uber/jaeger-lib/metrics" + "go.uber.org/zap" + + "github.com/uber/jaeger-client-go/config" +) +``` + +## Making A Change + +*Before making any significant changes, please [open an +issue](https://github.com/jaegertracing/jaeger-client-go/issues).* Discussing your proposed +changes ahead of time will make the contribution process smooth for everyone. + +Once we've discussed your changes and you've got your code ready, make sure +that tests are passing (`make test` or `make cover`) and open your PR. Your +pull request is most likely to be accepted if it: + +* Includes tests for new functionality. +* Follows the guidelines in [Effective + Go](https://golang.org/doc/effective_go.html) and the [Go team's common code + review comments](https://github.com/golang/go/wiki/CodeReviewComments). +* Has a [good commit message](https://chris.beams.io/posts/git-commit/): + * Separate subject from body with a blank line + * Limit the subject line to 50 characters + * Capitalize the subject line + * Do not end the subject line with a period + * Use the imperative mood in the subject line + * Wrap the body at 72 characters + * Use the body to explain _what_ and _why_ instead of _how_ +* Each commit must be signed by the author ([see below](#sign-your-work)). + +## License + +By contributing your code, you agree to license your contribution under the terms +of the [Apache License](LICENSE). + +If you are adding a new file it should have a header like below. The easiest +way to add such header is to run `make fmt`. + +``` +// Copyright (c) 2017 The Jaeger Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +``` + +## Sign your work + +The sign-off is a simple line at the end of the explanation for the +patch, which certifies that you wrote it or otherwise have the right to +pass it on as an open-source patch. The rules are pretty simple: if you +can certify the below (from +[developercertificate.org](http://developercertificate.org/)): + +``` +Developer Certificate of Origin +Version 1.1 + +Copyright (C) 2004, 2006 The Linux Foundation and its contributors. +660 York Street, Suite 102, +San Francisco, CA 94110 USA + +Everyone is permitted to copy and distribute verbatim copies of this +license document, but changing it is not allowed. + + +Developer's Certificate of Origin 1.1 + +By making a contribution to this project, I certify that: + +(a) The contribution was created in whole or in part by me and I + have the right to submit it under the open source license + indicated in the file; or + +(b) The contribution is based upon previous work that, to the best + of my knowledge, is covered under an appropriate open source + license and I have the right under that license to submit that + work with modifications, whether created in whole or in part + by me, under the same open source license (unless I am + permitted to submit under a different license), as indicated + in the file; or + +(c) The contribution was provided directly to me by some other + person who certified (a), (b) or (c) and I have not modified + it. + +(d) I understand and agree that this project and the contribution + are public and that a record of the contribution (including all + personal information I submit with it, including my sign-off) is + maintained indefinitely and may be redistributed consistent with + this project or the open source license(s) involved. +``` + +then you just add a line to every git commit message: + + Signed-off-by: Joe Smith + +using your real name (sorry, no pseudonyms or anonymous contributions.) + +You can add the sign off when creating the git commit via `git commit -s`. + +If you want this to be automatic you can set up some aliases: + +``` +git config --add alias.amend "commit -s --amend" +git config --add alias.c "commit -s" +``` diff --git a/vendor/github.com/uber/jaeger-client-go/DCO b/vendor/github.com/uber/jaeger-client-go/DCO new file mode 100644 index 000000000000..068953d4bd98 --- /dev/null +++ b/vendor/github.com/uber/jaeger-client-go/DCO @@ -0,0 +1,37 @@ +Developer Certificate of Origin +Version 1.1 + +Copyright (C) 2004, 2006 The Linux Foundation and its contributors. +660 York Street, Suite 102, +San Francisco, CA 94110 USA + +Everyone is permitted to copy and distribute verbatim copies of this +license document, but changing it is not allowed. + + +Developer's Certificate of Origin 1.1 + +By making a contribution to this project, I certify that: + +(a) The contribution was created in whole or in part by me and I + have the right to submit it under the open source license + indicated in the file; or + +(b) The contribution is based upon previous work that, to the best + of my knowledge, is covered under an appropriate open source + license and I have the right under that license to submit that + work with modifications, whether created in whole or in part + by me, under the same open source license (unless I am + permitted to submit under a different license), as indicated + in the file; or + +(c) The contribution was provided directly to me by some other + person who certified (a), (b) or (c) and I have not modified + it. + +(d) I understand and agree that this project and the contribution + are public and that a record of the contribution (including all + personal information I submit with it, including my sign-off) is + maintained indefinitely and may be redistributed consistent with + this project or the open source license(s) involved. + diff --git a/vendor/github.com/uber/jaeger-client-go/Gopkg.lock b/vendor/github.com/uber/jaeger-client-go/Gopkg.lock new file mode 100644 index 000000000000..ec054c6ed62a --- /dev/null +++ b/vendor/github.com/uber/jaeger-client-go/Gopkg.lock @@ -0,0 +1,164 @@ +# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'. + + +[[projects]] + branch = "master" + name = "github.com/beorn7/perks" + packages = ["quantile"] + revision = "3a771d992973f24aa725d07868b467d1ddfceafb" + +[[projects]] + branch = "master" + name = "github.com/codahale/hdrhistogram" + packages = ["."] + revision = "3a0bb77429bd3a61596f5e8a3172445844342120" + +[[projects]] + branch = "master" + name = "github.com/crossdock/crossdock-go" + packages = [ + ".", + "assert", + "require" + ] + revision = "049aabb0122b03bc9bd30cab8f3f91fb60166361" + +[[projects]] + name = "github.com/davecgh/go-spew" + packages = ["spew"] + revision = "346938d642f2ec3594ed81d874461961cd0faa76" + version = "v1.1.0" + +[[projects]] + name = "github.com/golang/protobuf" + packages = ["proto"] + revision = "925541529c1fa6821df4e44ce2723319eb2be768" + version = "v1.0.0" + +[[projects]] + name = "github.com/matttproud/golang_protobuf_extensions" + packages = ["pbutil"] + revision = "3247c84500bff8d9fb6d579d800f20b3e091582c" + version = "v1.0.0" + +[[projects]] + name = "github.com/opentracing/opentracing-go" + packages = [ + ".", + "ext", + "log" + ] + revision = "1949ddbfd147afd4d964a9f00b24eb291e0e7c38" + version = "v1.0.2" + +[[projects]] + name = "github.com/pkg/errors" + packages = ["."] + revision = "645ef00459ed84a119197bfb8d8205042c6df63d" + version = "v0.8.0" + +[[projects]] + name = "github.com/pmezard/go-difflib" + packages = ["difflib"] + revision = "792786c7400a136282c1664665ae0a8db921c6c2" + version = "v1.0.0" + +[[projects]] + name = "github.com/prometheus/client_golang" + packages = ["prometheus"] + revision = "c5b7fccd204277076155f10851dad72b76a49317" + version = "v0.8.0" + +[[projects]] + branch = "master" + name = "github.com/prometheus/client_model" + packages = ["go"] + revision = "99fa1f4be8e564e8a6b613da7fa6f46c9edafc6c" + +[[projects]] + branch = "master" + name = "github.com/prometheus/common" + packages = [ + "expfmt", + "internal/bitbucket.org/ww/goautoneg", + "model" + ] + revision = "d811d2e9bf898806ecfb6ef6296774b13ffc314c" + +[[projects]] + branch = "master" + name = "github.com/prometheus/procfs" + packages = [ + ".", + "internal/util", + "nfs", + "xfs" + ] + revision = "8b1c2da0d56deffdbb9e48d4414b4e674bd8083e" + +[[projects]] + name = "github.com/stretchr/testify" + packages = [ + "assert", + "require", + "suite" + ] + revision = "12b6f73e6084dad08a7c6e575284b177ecafbc71" + version = "v1.2.1" + +[[projects]] + name = "github.com/uber-go/atomic" + packages = ["."] + revision = "8474b86a5a6f79c443ce4b2992817ff32cf208b8" + version = "v1.3.1" + +[[projects]] + name = "github.com/uber/jaeger-lib" + packages = [ + "metrics", + "metrics/prometheus", + "metrics/testutils" + ] + revision = "4267858c0679cd4e47cefed8d7f70fd386cfb567" + version = "v1.4.0" + +[[projects]] + name = "go.uber.org/atomic" + packages = ["."] + revision = "54f72d32435d760d5604f17a82e2435b28dc4ba5" + version = "v1.3.0" + +[[projects]] + name = "go.uber.org/multierr" + packages = ["."] + revision = "3c4937480c32f4c13a875a1829af76c98ca3d40a" + version = "v1.1.0" + +[[projects]] + name = "go.uber.org/zap" + packages = [ + ".", + "buffer", + "internal/bufferpool", + "internal/color", + "internal/exit", + "zapcore" + ] + revision = "eeedf312bc6c57391d84767a4cd413f02a917974" + version = "v1.8.0" + +[[projects]] + branch = "master" + name = "golang.org/x/net" + packages = [ + "context", + "context/ctxhttp" + ] + revision = "5f9ae10d9af5b1c89ae6904293b14b064d4ada23" + +[solve-meta] + analyzer-name = "dep" + analyzer-version = 1 + inputs-digest = "f9dcfaf37a785c5dac1e20c29605eda29a83ba9c6f8842e92960dc94c8c4ff80" + solver-name = "gps-cdcl" + solver-version = 1 diff --git a/vendor/github.com/uber/jaeger-client-go/Gopkg.toml b/vendor/github.com/uber/jaeger-client-go/Gopkg.toml new file mode 100644 index 000000000000..baf7a6bdf7ae --- /dev/null +++ b/vendor/github.com/uber/jaeger-client-go/Gopkg.toml @@ -0,0 +1,27 @@ +[[constraint]] + name = "github.com/crossdock/crossdock-go" + branch = "master" + +[[constraint]] + name = "github.com/opentracing/opentracing-go" + version = "^1" + +[[constraint]] + name = "github.com/prometheus/client_golang" + version = "0.8.0" + +[[constraint]] + name = "github.com/stretchr/testify" + version = "^1.1.3" + +[[constraint]] + name = "github.com/uber-go/atomic" + version = "^1" + +[[constraint]] + name = "github.com/uber/jaeger-lib" + version = "^1.3" + +[[constraint]] + name = "go.uber.org/zap" + version = "^1" diff --git a/vendor/github.com/uber/jaeger-client-go/LICENSE b/vendor/github.com/uber/jaeger-client-go/LICENSE new file mode 100644 index 000000000000..261eeb9e9f8b --- /dev/null +++ b/vendor/github.com/uber/jaeger-client-go/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/uber/jaeger-client-go/Makefile b/vendor/github.com/uber/jaeger-client-go/Makefile new file mode 100644 index 000000000000..601cc65148e2 --- /dev/null +++ b/vendor/github.com/uber/jaeger-client-go/Makefile @@ -0,0 +1,117 @@ +PROJECT_ROOT=github.com/uber/jaeger-client-go +PACKAGES := $(shell glide novendor | grep -v -e ./thrift-gen/... -e ./thrift/...) +# all .go files that don't exist in hidden directories +ALL_SRC := $(shell find . -name "*.go" | grep -v -e vendor -e thrift-gen -e ./thrift/ \ + -e ".*/\..*" \ + -e ".*/_.*" \ + -e ".*/mocks.*") + +-include crossdock/rules.mk + +export GO15VENDOREXPERIMENT=1 + +RACE=-race +GOTEST=go test -v $(RACE) +GOLINT=golint +GOVET=go vet +GOFMT=gofmt +FMT_LOG=fmt.log +LINT_LOG=lint.log + +THRIFT_VER=0.9.3 +THRIFT_IMG=thrift:$(THRIFT_VER) +THRIFT=docker run -v "${PWD}:/data" $(THRIFT_IMG) thrift +THRIFT_GO_ARGS=thrift_import="github.com/apache/thrift/lib/go/thrift" +THRIFT_GEN_DIR=thrift-gen + +PASS=$(shell printf "\033[32mPASS\033[0m") +FAIL=$(shell printf "\033[31mFAIL\033[0m") +COLORIZE=sed ''/PASS/s//$(PASS)/'' | sed ''/FAIL/s//$(FAIL)/'' + +.DEFAULT_GOAL := test-and-lint + +.PHONY: test-and-lint +test-and-lint: test fmt lint + +.PHONY: test +test: + bash -c "set -e; set -o pipefail; $(GOTEST) $(PACKAGES) | $(COLORIZE)" + +.PHONY: fmt +fmt: + $(GOFMT) -e -s -l -w $(ALL_SRC) + ./scripts/updateLicenses.sh + +.PHONY: lint +lint: + $(GOVET) $(PACKAGES) + @cat /dev/null > $(LINT_LOG) + @$(foreach pkg, $(PACKAGES), $(GOLINT) $(pkg) | grep -v crossdock/thrift >> $(LINT_LOG) || true;) + @[ ! -s "$(LINT_LOG)" ] || (echo "Lint Failures" | cat - $(LINT_LOG) && false) + @$(GOFMT) -e -s -l $(ALL_SRC) > $(FMT_LOG) + ./scripts/updateLicenses.sh >> $(FMT_LOG) + @[ ! -s "$(FMT_LOG)" ] || (echo "go fmt or license check failures, run 'make fmt'" | cat - $(FMT_LOG) && false) + + +.PHONY: install +install: + glide --version || go get github.com/Masterminds/glide +ifeq ($(USE_DEP),true) + dep ensure +else + glide install +endif + + +.PHONY: cover +cover: + ./scripts/cover.sh $(shell go list $(PACKAGES)) + go tool cover -html=cover.out -o cover.html + + +# This is not part of the regular test target because we don't want to slow it +# down. +.PHONY: test-examples +test-examples: + make -C examples + +# TODO at the moment we're not generating tchan_*.go files +thrift: idl-submodule thrift-image + $(THRIFT) -o /data --gen go:$(THRIFT_GO_ARGS) --out /data/$(THRIFT_GEN_DIR) /data/idl/thrift/agent.thrift + $(THRIFT) -o /data --gen go:$(THRIFT_GO_ARGS) --out /data/$(THRIFT_GEN_DIR) /data/idl/thrift/sampling.thrift + $(THRIFT) -o /data --gen go:$(THRIFT_GO_ARGS) --out /data/$(THRIFT_GEN_DIR) /data/idl/thrift/jaeger.thrift + $(THRIFT) -o /data --gen go:$(THRIFT_GO_ARGS) --out /data/$(THRIFT_GEN_DIR) /data/idl/thrift/zipkincore.thrift + $(THRIFT) -o /data --gen go:$(THRIFT_GO_ARGS) --out /data/$(THRIFT_GEN_DIR) /data/idl/thrift/baggage.thrift + $(THRIFT) -o /data --gen go:$(THRIFT_GO_ARGS) --out /data/crossdock/thrift/ /data/idl/thrift/crossdock/tracetest.thrift + sed -i '' 's|"zipkincore"|"$(PROJECT_ROOT)/thrift-gen/zipkincore"|g' $(THRIFT_GEN_DIR)/agent/*.go + sed -i '' 's|"jaeger"|"$(PROJECT_ROOT)/thrift-gen/jaeger"|g' $(THRIFT_GEN_DIR)/agent/*.go + sed -i '' 's|"github.com/apache/thrift/lib/go/thrift"|"github.com/uber/jaeger-client-go/thrift"|g' \ + $(THRIFT_GEN_DIR)/*/*.go crossdock/thrift/tracetest/*.go + rm -rf thrift-gen/*/*-remote + rm -rf crossdock/thrift/*/*-remote + rm -rf thrift-gen/jaeger/collector.go + +idl-submodule: + git submodule init + git submodule update + +thrift-image: + $(THRIFT) -version + +.PHONY: install-dep-ci +install-dep-ci: + - curl -L -s https://github.com/golang/dep/releases/download/v0.3.2/dep-linux-amd64 -o $$GOPATH/bin/dep + - chmod +x $$GOPATH/bin/dep + +.PHONY: install-ci +install-ci: install-dep-ci install + go get github.com/wadey/gocovmerge + go get github.com/mattn/goveralls + go get golang.org/x/tools/cmd/cover + go get github.com/golang/lint/golint + +.PHONY: test-ci +test-ci: + @./scripts/cover.sh $(shell go list $(PACKAGES)) + make lint + diff --git a/vendor/github.com/uber/jaeger-client-go/README.md b/vendor/github.com/uber/jaeger-client-go/README.md new file mode 100644 index 000000000000..16b04454e168 --- /dev/null +++ b/vendor/github.com/uber/jaeger-client-go/README.md @@ -0,0 +1,260 @@ +[![GoDoc][doc-img]][doc] [![Build Status][ci-img]][ci] [![Coverage Status][cov-img]][cov] [![OpenTracing 1.0 Enabled][ot-img]][ot-url] + +# Jaeger Bindings for Go OpenTracing API + +Instrumentation library that implements an +[OpenTracing](http://opentracing.io) Tracer for Jaeger (https://jaegertracing.io). + +**IMPORTANT**: The library's import path is based on its original location under `github.com/uber`. Do not try to import it as `github.com/jaegertracing`, it will not compile. We might revisit this in the next major release. + * :white_check_mark: `import "github.com/uber/jaeger-client-go"` + * :x: `import "github.com/jaegertracing/jaeger-client-go"` + +## How to Contribute + +Please see [CONTRIBUTING.md](CONTRIBUTING.md). + +## Installation + +We recommended using a dependency manager like [glide](https://github.com/Masterminds/glide) +and [semantic versioning](http://semver.org/) when including this library into an application. +For example, Jaeger backend imports this library like this: + +```yaml +- package: github.com/uber/jaeger-client-go + version: ^2.7.0 +``` + +If you instead want to use the latest version in `master`, you can pull it via `go get`. +Note that during `go get` you may see build errors due to incompatible dependencies, which is why +we recommend using semantic versions for dependencies. The error may be fixed by running +`make install` (it will install `glide` if you don't have it): + +```shell +go get -u github.com/uber/jaeger-client-go/ +cd $GOPATH/src/github.com/uber/jaeger-client-go/ +git submodule update --init --recursive +make install +``` + +## Initialization + +See tracer initialization examples in [godoc](https://godoc.org/github.com/uber/jaeger-client-go/config#pkg-examples) +and [config/example_test.go](./config/example_test.go). + +### Environment variables + +The tracer can be initialized with values coming from environment variables. None of the env vars are required +and all of them can be overriden via direct setting of the property on the configuration object. + +Property| Description +--- | --- +JAEGER_SERVICE_NAME | The service name +JAEGER_AGENT_HOST | The hostname for communicating with agent via UDP +JAEGER_AGENT_PORT | The port for communicating with agent via UDP +JAEGER_REPORTER_LOG_SPANS | Whether the reporter should also log the spans +JAEGER_REPORTER_MAX_QUEUE_SIZE | The reporter's maximum queue size +JAEGER_REPORTER_FLUSH_INTERVAL | The reporter's flush interval (ms) +JAEGER_SAMPLER_TYPE | The sampler type +JAEGER_SAMPLER_PARAM | The sampler parameter (number) +JAEGER_SAMPLER_MANAGER_HOST_PORT | The host name and port when using the remote controlled sampler +JAEGER_SAMPLER_MAX_OPERATIONS | The maximum number of operations that the sampler will keep track of +JAEGER_SAMPLER_REFRESH_INTERVAL | How often the remotely controlled sampler will poll jaeger-agent for the appropriate sampling strategy +JAEGER_TAGS | A comma separated list of `name = value` tracer level tags, which get added to all reported spans. The value can also refer to an environment variable using the format `${envVarName:default}`, where the `:default` is optional, and identifies a value to be used if the environment variable cannot be found +JAEGER_DISABLED | Whether the tracer is disabled or not. If true, the default `opentracing.NoopTracer` is used. +JAEGER_RPC_METRICS | Whether to store RPC metrics + +### Closing the tracer via `io.Closer` + +The constructor function for Jaeger Tracer returns the tracer itself and an `io.Closer` instance. +It is recommended to structure your `main()` so that it calls the `Close()` function on the closer +before exiting, e.g. + +```go +tracer, closer, err := cfg.NewTracer(...) +defer closer.Close() +``` + +This is especially useful for command-line tools that enable tracing, as well as +for the long-running apps that support graceful shutdown. For example, if your deployment +system sends SIGTERM instead of killing the process and you trap that signal to do a graceful +exit, then having `defer closer.Closer()` ensures that all buffered spans are flushed. + +### Metrics & Monitoring + +The tracer emits a number of different metrics, defined in +[metrics.go](metrics.go). The monitoring backend is expected to support +tag-based metric names, e.g. instead of `statsd`-style string names +like `counters.my-service.jaeger.spans.started.sampled`, the metrics +are defined by a short name and a collection of key/value tags, for +example: `name:jaeger.traces, state:started, sampled:y`. See [metrics.go](./metrics.go) +file for the full list and descriptions of emitted metrics. + +The monitoring backend is represented by the `metrics.Factory` interface from package +[`"github.com/uber/jaeger-lib/metrics"`](https://github.com/jaegertracing/jaeger-lib/tree/master/metrics). An implementation +of that interface can be passed as an option to either the Configuration object or the Tracer +constructor, for example: + +```go +import ( + "github.com/uber/jaeger-client-go/config" + "github.com/uber/jaeger-lib/metrics/prometheus" +) + + metricsFactory := prometheus.New() + tracer, closer, err := config.Configuration{ + ServiceName: "your-service-name", + }.NewTracer( + config.Metrics(metricsFactory), + ) +``` + +By default, a no-op `metrics.NullFactory` is used. + +### Logging + +The tracer can be configured with an optional logger, which will be +used to log communication errors, or log spans if a logging reporter +option is specified in the configuration. The logging API is abstracted +by the [Logger](logger.go) interface. A logger instance implementing +this interface can be set on the `Config` object before calling the +`New` method. + +Besides the [zap](https://github.com/uber-go/zap) implementation +bundled with this package there is also a [go-kit](https://github.com/go-kit/kit) +one in the [jaeger-lib](https://github.com/jaegertracing/jaeger-lib) repository. + +## Instrumentation for Tracing + +Since this tracer is fully compliant with OpenTracing API 1.0, +all code instrumentation should only use the API itself, as described +in the [opentracing-go](https://github.com/opentracing/opentracing-go) documentation. + +## Features + +### Reporters + +A "reporter" is a component that receives the finished spans and reports +them to somewhere. Under normal circumstances, the Tracer +should use the default `RemoteReporter`, which sends the spans out of +process via configurable "transport". For testing purposes, one can +use an `InMemoryReporter` that accumulates spans in a buffer and +allows to retrieve them for later verification. Also available are +`NullReporter`, a no-op reporter that does nothing, a `LoggingReporter` +which logs all finished spans using their `String()` method, and a +`CompositeReporter` that can be used to combine more than one reporter +into one, e.g. to attach a logging reporter to the main remote reporter. + +### Span Reporting Transports + +The remote reporter uses "transports" to actually send the spans out +of process. Currently the supported transports include: + * [Jaeger Thrift](https://github.com/jaegertracing/jaeger-idl/blob/master/thrift/agent.thrift) over UDP or HTTP, + * [Zipkin Thrift](https://github.com/jaegertracing/jaeger-idl/blob/master/thrift/zipkincore.thrift) over HTTP. + +### Sampling + +The tracer does not record all spans, but only those that have the +sampling bit set in the `flags`. When a new trace is started and a new +unique ID is generated, a sampling decision is made whether this trace +should be sampled. The sampling decision is propagated to all downstream +calls via the `flags` field of the trace context. The following samplers +are available: + 1. `RemotelyControlledSampler` uses one of the other simpler samplers + and periodically updates it by polling an external server. This + allows dynamic control of the sampling strategies. + 1. `ConstSampler` always makes the same sampling decision for all + trace IDs. it can be configured to either sample all traces, or + to sample none. + 1. `ProbabilisticSampler` uses a fixed sampling rate as a probability + for a given trace to be sampled. The actual decision is made by + comparing the trace ID with a random number multiplied by the + sampling rate. + 1. `RateLimitingSampler` can be used to allow only a certain fixed + number of traces to be sampled per second. + +### Baggage Injection + +The OpenTracing spec allows for [baggage][baggage], which are key value pairs that are added +to the span context and propagated throughout the trace. An external process can inject baggage +by setting the special HTTP Header `jaeger-baggage` on a request: + +```sh +curl -H "jaeger-baggage: key1=value1, key2=value2" http://myhost.com +``` + +Baggage can also be programatically set inside your service: + +```go +if span := opentracing.SpanFromContext(ctx); span != nil { + span.SetBaggageItem("key", "value") +} +``` + +Another service downstream of that can retrieve the baggage in a similar way: + +```go +if span := opentracing.SpanFromContext(ctx); span != nil { + val := span.BaggageItem("key") + println(val) +} +``` + +### Debug Traces (Forced Sampling) + +#### Programmatically + +The OpenTracing API defines a `sampling.priority` standard tag that +can be used to affect the sampling of a span and its children: + +```go +import ( + "github.com/opentracing/opentracing-go" + "github.com/opentracing/opentracing-go/ext" +) + +span := opentracing.SpanFromContext(ctx) +ext.SamplingPriority.Set(span, 1) +``` + +#### Via HTTP Headers + +Jaeger Tracer also understands a special HTTP Header `jaeger-debug-id`, +which can be set in the incoming request, e.g. + +```sh +curl -H "jaeger-debug-id: some-correlation-id" http://myhost.com +``` + +When Jaeger sees this header in the request that otherwise has no +tracing context, it ensures that the new trace started for this +request will be sampled in the "debug" mode (meaning it should survive +all downsampling that might happen in the collection pipeline), and the +root span will have a tag as if this statement was executed: + +```go +span.SetTag("jaeger-debug-id", "some-correlation-id") +``` + +This allows using Jaeger UI to find the trace by this tag. + +### Zipkin HTTP B3 compatible header propagation + +Jaeger Tracer supports Zipkin B3 Propagation HTTP headers, which are used +by a lot of Zipkin tracers. This means that you can use Jaeger in conjunction with e.g. [these OpenZipkin tracers](https://github.com/openzipkin). + +However it is not the default propagation format, see [here](zipkin/README.md#NewZipkinB3HTTPHeaderPropagator) how to set it up. + +## License + +[Apache 2.0 License](LICENSE). + + +[doc-img]: https://godoc.org/github.com/uber/jaeger-client-go?status.svg +[doc]: https://godoc.org/github.com/uber/jaeger-client-go +[ci-img]: https://travis-ci.org/jaegertracing/jaeger-client-go.svg?branch=master +[ci]: https://travis-ci.org/jaegertracing/jaeger-client-go +[cov-img]: https://codecov.io/gh/jaegertracing/jaeger-client-go/branch/master/graph/badge.svg +[cov]: https://codecov.io/gh/jaegertracing/jaeger-client-go +[ot-img]: https://img.shields.io/badge/OpenTracing--1.0-enabled-blue.svg +[ot-url]: http://opentracing.io +[baggage]: https://github.com/opentracing/specification/blob/master/specification.md#set-a-baggage-item diff --git a/vendor/github.com/uber/jaeger-client-go/RELEASE.md b/vendor/github.com/uber/jaeger-client-go/RELEASE.md new file mode 100644 index 000000000000..115e49ab8ad3 --- /dev/null +++ b/vendor/github.com/uber/jaeger-client-go/RELEASE.md @@ -0,0 +1,11 @@ +# Release Process + +1. Create a PR "Preparing for release X.Y.Z" against master branch + * Alter CHANGELOG.md from ` (unreleased)` to ` (YYYY-MM-DD)` + * Update `JaegerClientVersion` in constants.go to `Go-X.Y.Z` +2. Create a release "Release X.Y.Z" on Github + * Create Tag `vX.Y.Z` + * Copy CHANGELOG.md into the release notes +3. Create a PR "Back to development" against master branch + * Add ` (unreleased)` to CHANGELOG.md + * Update `JaegerClientVersion` in constants.go to `Go-dev` diff --git a/vendor/github.com/uber/jaeger-client-go/baggage_setter.go b/vendor/github.com/uber/jaeger-client-go/baggage_setter.go new file mode 100644 index 000000000000..1037ca0e861d --- /dev/null +++ b/vendor/github.com/uber/jaeger-client-go/baggage_setter.go @@ -0,0 +1,77 @@ +// Copyright (c) 2017 Uber Technologies, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package jaeger + +import ( + "github.com/opentracing/opentracing-go/log" + + "github.com/uber/jaeger-client-go/internal/baggage" +) + +// baggageSetter is an actor that can set a baggage value on a Span given certain +// restrictions (eg. maxValueLength). +type baggageSetter struct { + restrictionManager baggage.RestrictionManager + metrics *Metrics +} + +func newBaggageSetter(restrictionManager baggage.RestrictionManager, metrics *Metrics) *baggageSetter { + return &baggageSetter{ + restrictionManager: restrictionManager, + metrics: metrics, + } +} + +// (NB) span should hold the lock before making this call +func (s *baggageSetter) setBaggage(span *Span, key, value string) { + var truncated bool + var prevItem string + restriction := s.restrictionManager.GetRestriction(span.serviceName(), key) + if !restriction.KeyAllowed() { + s.logFields(span, key, value, prevItem, truncated, restriction.KeyAllowed()) + s.metrics.BaggageUpdateFailure.Inc(1) + return + } + if len(value) > restriction.MaxValueLength() { + truncated = true + value = value[:restriction.MaxValueLength()] + s.metrics.BaggageTruncate.Inc(1) + } + prevItem = span.context.baggage[key] + s.logFields(span, key, value, prevItem, truncated, restriction.KeyAllowed()) + span.context = span.context.WithBaggageItem(key, value) + s.metrics.BaggageUpdateSuccess.Inc(1) +} + +func (s *baggageSetter) logFields(span *Span, key, value, prevItem string, truncated, valid bool) { + if !span.context.IsSampled() { + return + } + fields := []log.Field{ + log.String("event", "baggage"), + log.String("key", key), + log.String("value", value), + } + if prevItem != "" { + fields = append(fields, log.String("override", "true")) + } + if truncated { + fields = append(fields, log.String("truncated", "true")) + } + if !valid { + fields = append(fields, log.String("invalid", "true")) + } + span.logFieldsNoLocking(fields...) +} diff --git a/vendor/github.com/uber/jaeger-client-go/config/config.go b/vendor/github.com/uber/jaeger-client-go/config/config.go new file mode 100644 index 000000000000..1eb29677854e --- /dev/null +++ b/vendor/github.com/uber/jaeger-client-go/config/config.go @@ -0,0 +1,373 @@ +// Copyright (c) 2017-2018 Uber Technologies, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package config + +import ( + "errors" + "fmt" + "io" + "strings" + "time" + + "github.com/opentracing/opentracing-go" + + "github.com/uber/jaeger-client-go" + "github.com/uber/jaeger-client-go/internal/baggage/remote" + throttler "github.com/uber/jaeger-client-go/internal/throttler/remote" + "github.com/uber/jaeger-client-go/rpcmetrics" +) + +const defaultSamplingProbability = 0.001 + +// Configuration configures and creates Jaeger Tracer +type Configuration struct { + // ServiceName specifies the service name to use on the tracer. + // Can be provided via environment variable named JAEGER_SERVICE_NAME + ServiceName string `yaml:"serviceName"` + + // Disabled can be provided via environment variable named JAEGER_DISABLED + Disabled bool `yaml:"disabled"` + + // RPCMetrics can be provided via environment variable named JAEGER_RPC_METRICS + RPCMetrics bool `yaml:"rpc_metrics"` + + // Tags can be provided via environment variable named JAEGER_TAGS + Tags []opentracing.Tag `yaml:"tags"` + + Sampler *SamplerConfig `yaml:"sampler"` + Reporter *ReporterConfig `yaml:"reporter"` + Headers *jaeger.HeadersConfig `yaml:"headers"` + BaggageRestrictions *BaggageRestrictionsConfig `yaml:"baggage_restrictions"` + Throttler *ThrottlerConfig `yaml:"throttler"` +} + +// SamplerConfig allows initializing a non-default sampler. All fields are optional. +type SamplerConfig struct { + // Type specifies the type of the sampler: const, probabilistic, rateLimiting, or remote + // Can be set by exporting an environment variable named JAEGER_SAMPLER_TYPE + Type string `yaml:"type"` + + // Param is a value passed to the sampler. + // Valid values for Param field are: + // - for "const" sampler, 0 or 1 for always false/true respectively + // - for "probabilistic" sampler, a probability between 0 and 1 + // - for "rateLimiting" sampler, the number of spans per second + // - for "remote" sampler, param is the same as for "probabilistic" + // and indicates the initial sampling rate before the actual one + // is received from the mothership. + // Can be set by exporting an environment variable named JAEGER_SAMPLER_PARAM + Param float64 `yaml:"param"` + + // SamplingServerURL is the address of jaeger-agent's HTTP sampling server + // Can be set by exporting an environment variable named JAEGER_SAMPLER_MANAGER_HOST_PORT + SamplingServerURL string `yaml:"samplingServerURL"` + + // MaxOperations is the maximum number of operations that the sampler + // will keep track of. If an operation is not tracked, a default probabilistic + // sampler will be used rather than the per operation specific sampler. + // Can be set by exporting an environment variable named JAEGER_SAMPLER_MAX_OPERATIONS + MaxOperations int `yaml:"maxOperations"` + + // SamplingRefreshInterval controls how often the remotely controlled sampler will poll + // jaeger-agent for the appropriate sampling strategy. + // Can be set by exporting an environment variable named JAEGER_SAMPLER_REFRESH_INTERVAL + SamplingRefreshInterval time.Duration `yaml:"samplingRefreshInterval"` +} + +// ReporterConfig configures the reporter. All fields are optional. +type ReporterConfig struct { + // QueueSize controls how many spans the reporter can keep in memory before it starts dropping + // new spans. The queue is continuously drained by a background go-routine, as fast as spans + // can be sent out of process. + // Can be set by exporting an environment variable named JAEGER_REPORTER_MAX_QUEUE_SIZE + QueueSize int `yaml:"queueSize"` + + // BufferFlushInterval controls how often the buffer is force-flushed, even if it's not full. + // It is generally not useful, as it only matters for very low traffic services. + // Can be set by exporting an environment variable named JAEGER_REPORTER_FLUSH_INTERVAL + BufferFlushInterval time.Duration + + // LogSpans, when true, enables LoggingReporter that runs in parallel with the main reporter + // and logs all submitted spans. Main Configuration.Logger must be initialized in the code + // for this option to have any effect. + // Can be set by exporting an environment variable named JAEGER_REPORTER_LOG_SPANS + LogSpans bool `yaml:"logSpans"` + + // LocalAgentHostPort instructs reporter to send spans to jaeger-agent at this address + // Can be set by exporting an environment variable named JAEGER_AGENT_HOST / JAEGER_AGENT_PORT + LocalAgentHostPort string `yaml:"localAgentHostPort"` +} + +// BaggageRestrictionsConfig configures the baggage restrictions manager which can be used to whitelist +// certain baggage keys. All fields are optional. +type BaggageRestrictionsConfig struct { + // DenyBaggageOnInitializationFailure controls the startup failure mode of the baggage restriction + // manager. If true, the manager will not allow any baggage to be written until baggage restrictions have + // been retrieved from jaeger-agent. If false, the manager wil allow any baggage to be written until baggage + // restrictions have been retrieved from jaeger-agent. + DenyBaggageOnInitializationFailure bool `yaml:"denyBaggageOnInitializationFailure"` + + // HostPort is the hostPort of jaeger-agent's baggage restrictions server + HostPort string `yaml:"hostPort"` + + // RefreshInterval controls how often the baggage restriction manager will poll + // jaeger-agent for the most recent baggage restrictions. + RefreshInterval time.Duration `yaml:"refreshInterval"` +} + +// ThrottlerConfig configures the throttler which can be used to throttle the +// rate at which the client may send debug requests. +type ThrottlerConfig struct { + // HostPort of jaeger-agent's credit server. + HostPort string `yaml:"hostPort"` + + // RefreshInterval controls how often the throttler will poll jaeger-agent + // for more throttling credits. + RefreshInterval time.Duration `yaml:"refreshInterval"` + + // SynchronousInitialization determines whether or not the throttler should + // synchronously fetch credits from the agent when an operation is seen for + // the first time. This should be set to true if the client will be used by + // a short lived service that needs to ensure that credits are fetched + // upfront such that sampling or throttling occurs. + SynchronousInitialization bool `yaml:"synchronousInitialization"` +} + +type nullCloser struct{} + +func (*nullCloser) Close() error { return nil } + +// New creates a new Jaeger Tracer, and a closer func that can be used to flush buffers +// before shutdown. +// +// Deprecated: use NewTracer() function +func (c Configuration) New( + serviceName string, + options ...Option, +) (opentracing.Tracer, io.Closer, error) { + if serviceName != "" { + c.ServiceName = serviceName + } + + return c.NewTracer(options...) +} + +// NewTracer returns a new tracer based on the current configuration, using the given options, +// and a closer func that can be used to flush buffers before shutdown. +func (c Configuration) NewTracer(options ...Option) (opentracing.Tracer, io.Closer, error) { + if c.ServiceName == "" { + return nil, nil, errors.New("no service name provided") + } + + if c.Disabled { + return &opentracing.NoopTracer{}, &nullCloser{}, nil + } + opts := applyOptions(options...) + tracerMetrics := jaeger.NewMetrics(opts.metrics, nil) + if c.RPCMetrics { + Observer( + rpcmetrics.NewObserver( + opts.metrics.Namespace("jaeger-rpc", map[string]string{"component": "jaeger"}), + rpcmetrics.DefaultNameNormalizer, + ), + )(&opts) // adds to c.observers + } + if c.Sampler == nil { + c.Sampler = &SamplerConfig{ + Type: jaeger.SamplerTypeRemote, + Param: defaultSamplingProbability, + } + } + if c.Reporter == nil { + c.Reporter = &ReporterConfig{} + } + + sampler := opts.sampler + if sampler == nil { + s, err := c.Sampler.NewSampler(c.ServiceName, tracerMetrics) + if err != nil { + return nil, nil, err + } + sampler = s + } + + reporter := opts.reporter + if reporter == nil { + r, err := c.Reporter.NewReporter(c.ServiceName, tracerMetrics, opts.logger) + if err != nil { + return nil, nil, err + } + reporter = r + } + + tracerOptions := []jaeger.TracerOption{ + jaeger.TracerOptions.Metrics(tracerMetrics), + jaeger.TracerOptions.Logger(opts.logger), + jaeger.TracerOptions.CustomHeaderKeys(c.Headers), + jaeger.TracerOptions.Gen128Bit(opts.gen128Bit), + jaeger.TracerOptions.ZipkinSharedRPCSpan(opts.zipkinSharedRPCSpan), + jaeger.TracerOptions.MaxTagValueLength(opts.maxTagValueLength), + } + + for _, tag := range opts.tags { + tracerOptions = append(tracerOptions, jaeger.TracerOptions.Tag(tag.Key, tag.Value)) + } + + for _, tag := range c.Tags { + tracerOptions = append(tracerOptions, jaeger.TracerOptions.Tag(tag.Key, tag.Value)) + } + + for _, obs := range opts.observers { + tracerOptions = append(tracerOptions, jaeger.TracerOptions.Observer(obs)) + } + + for _, cobs := range opts.contribObservers { + tracerOptions = append(tracerOptions, jaeger.TracerOptions.ContribObserver(cobs)) + } + + for format, injector := range opts.injectors { + tracerOptions = append(tracerOptions, jaeger.TracerOptions.Injector(format, injector)) + } + + for format, extractor := range opts.extractors { + tracerOptions = append(tracerOptions, jaeger.TracerOptions.Extractor(format, extractor)) + } + + if c.BaggageRestrictions != nil { + mgr := remote.NewRestrictionManager( + c.ServiceName, + remote.Options.Metrics(tracerMetrics), + remote.Options.Logger(opts.logger), + remote.Options.HostPort(c.BaggageRestrictions.HostPort), + remote.Options.RefreshInterval(c.BaggageRestrictions.RefreshInterval), + remote.Options.DenyBaggageOnInitializationFailure( + c.BaggageRestrictions.DenyBaggageOnInitializationFailure, + ), + ) + tracerOptions = append(tracerOptions, jaeger.TracerOptions.BaggageRestrictionManager(mgr)) + } + + if c.Throttler != nil { + debugThrottler := throttler.NewThrottler( + c.ServiceName, + throttler.Options.Metrics(tracerMetrics), + throttler.Options.Logger(opts.logger), + throttler.Options.HostPort(c.Throttler.HostPort), + throttler.Options.RefreshInterval(c.Throttler.RefreshInterval), + throttler.Options.SynchronousInitialization( + c.Throttler.SynchronousInitialization, + ), + ) + + tracerOptions = append(tracerOptions, jaeger.TracerOptions.DebugThrottler(debugThrottler)) + } + + tracer, closer := jaeger.NewTracer( + c.ServiceName, + sampler, + reporter, + tracerOptions..., + ) + + return tracer, closer, nil +} + +// InitGlobalTracer creates a new Jaeger Tracer, and sets it as global OpenTracing Tracer. +// It returns a closer func that can be used to flush buffers before shutdown. +func (c Configuration) InitGlobalTracer( + serviceName string, + options ...Option, +) (io.Closer, error) { + if c.Disabled { + return &nullCloser{}, nil + } + tracer, closer, err := c.New(serviceName, options...) + if err != nil { + return nil, err + } + opentracing.SetGlobalTracer(tracer) + return closer, nil +} + +// NewSampler creates a new sampler based on the configuration +func (sc *SamplerConfig) NewSampler( + serviceName string, + metrics *jaeger.Metrics, +) (jaeger.Sampler, error) { + samplerType := strings.ToLower(sc.Type) + if samplerType == jaeger.SamplerTypeConst { + return jaeger.NewConstSampler(sc.Param != 0), nil + } + if samplerType == jaeger.SamplerTypeProbabilistic { + if sc.Param >= 0 && sc.Param <= 1.0 { + return jaeger.NewProbabilisticSampler(sc.Param) + } + return nil, fmt.Errorf( + "Invalid Param for probabilistic sampler: %v. Expecting value between 0 and 1", + sc.Param, + ) + } + if samplerType == jaeger.SamplerTypeRateLimiting { + return jaeger.NewRateLimitingSampler(sc.Param), nil + } + if samplerType == jaeger.SamplerTypeRemote || sc.Type == "" { + sc2 := *sc + sc2.Type = jaeger.SamplerTypeProbabilistic + initSampler, err := sc2.NewSampler(serviceName, nil) + if err != nil { + return nil, err + } + options := []jaeger.SamplerOption{ + jaeger.SamplerOptions.Metrics(metrics), + jaeger.SamplerOptions.InitialSampler(initSampler), + jaeger.SamplerOptions.SamplingServerURL(sc.SamplingServerURL), + } + if sc.MaxOperations != 0 { + options = append(options, jaeger.SamplerOptions.MaxOperations(sc.MaxOperations)) + } + if sc.SamplingRefreshInterval != 0 { + options = append(options, jaeger.SamplerOptions.SamplingRefreshInterval(sc.SamplingRefreshInterval)) + } + return jaeger.NewRemotelyControlledSampler(serviceName, options...), nil + } + return nil, fmt.Errorf("Unknown sampler type %v", sc.Type) +} + +// NewReporter instantiates a new reporter that submits spans to tcollector +func (rc *ReporterConfig) NewReporter( + serviceName string, + metrics *jaeger.Metrics, + logger jaeger.Logger, +) (jaeger.Reporter, error) { + sender, err := rc.newTransport() + if err != nil { + return nil, err + } + reporter := jaeger.NewRemoteReporter( + sender, + jaeger.ReporterOptions.QueueSize(rc.QueueSize), + jaeger.ReporterOptions.BufferFlushInterval(rc.BufferFlushInterval), + jaeger.ReporterOptions.Logger(logger), + jaeger.ReporterOptions.Metrics(metrics)) + if rc.LogSpans && logger != nil { + logger.Infof("Initializing logging reporter\n") + reporter = jaeger.NewCompositeReporter(jaeger.NewLoggingReporter(logger), reporter) + } + return reporter, err +} + +func (rc *ReporterConfig) newTransport() (jaeger.Transport, error) { + return jaeger.NewUDPTransport(rc.LocalAgentHostPort, 0) +} diff --git a/vendor/github.com/uber/jaeger-client-go/config/config_env.go b/vendor/github.com/uber/jaeger-client-go/config/config_env.go new file mode 100644 index 000000000000..96f170c539d9 --- /dev/null +++ b/vendor/github.com/uber/jaeger-client-go/config/config_env.go @@ -0,0 +1,205 @@ +// Copyright (c) 2018 The Jaeger Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package config + +import ( + "fmt" + "os" + "strconv" + "strings" + "time" + + opentracing "github.com/opentracing/opentracing-go" + "github.com/pkg/errors" + + "github.com/uber/jaeger-client-go" +) + +const ( + // environment variable names + envServiceName = "JAEGER_SERVICE_NAME" + envDisabled = "JAEGER_DISABLED" + envRPCMetrics = "JAEGER_RPC_METRICS" + envTags = "JAEGER_TAGS" + envSamplerType = "JAEGER_SAMPLER_TYPE" + envSamplerParam = "JAEGER_SAMPLER_PARAM" + envSamplerManagerHostPort = "JAEGER_SAMPLER_MANAGER_HOST_PORT" + envSamplerMaxOperations = "JAEGER_SAMPLER_MAX_OPERATIONS" + envSamplerRefreshInterval = "JAEGER_SAMPLER_REFRESH_INTERVAL" + envReporterMaxQueueSize = "JAEGER_REPORTER_MAX_QUEUE_SIZE" + envReporterFlushInterval = "JAEGER_REPORTER_FLUSH_INTERVAL" + envReporterLogSpans = "JAEGER_REPORTER_LOG_SPANS" + envAgentHost = "JAEGER_AGENT_HOST" + envAgentPort = "JAEGER_AGENT_PORT" +) + +// FromEnv uses environment variables to set the tracer's Configuration +func FromEnv() (*Configuration, error) { + c := &Configuration{} + + if e := os.Getenv(envServiceName); e != "" { + c.ServiceName = e + } + + if e := os.Getenv(envRPCMetrics); e != "" { + if value, err := strconv.ParseBool(e); err == nil { + c.RPCMetrics = value + } else { + return nil, errors.Wrapf(err, "cannot parse env var %s=%s", envRPCMetrics, e) + } + } + + if e := os.Getenv(envDisabled); e != "" { + if value, err := strconv.ParseBool(e); err == nil { + c.Disabled = value + } else { + return nil, errors.Wrapf(err, "cannot parse env var %s=%s", envDisabled, e) + } + } + + if e := os.Getenv(envTags); e != "" { + c.Tags = parseTags(e) + } + + if s, err := samplerConfigFromEnv(); err == nil { + c.Sampler = s + } else { + return nil, errors.Wrap(err, "cannot obtain sampler config from env") + } + + if r, err := reporterConfigFromEnv(); err == nil { + c.Reporter = r + } else { + return nil, errors.Wrap(err, "cannot obtain reporter config from env") + } + + return c, nil +} + +// samplerConfigFromEnv creates a new SamplerConfig based on the environment variables +func samplerConfigFromEnv() (*SamplerConfig, error) { + sc := &SamplerConfig{} + + if e := os.Getenv(envSamplerType); e != "" { + sc.Type = e + } + + if e := os.Getenv(envSamplerParam); e != "" { + if value, err := strconv.ParseFloat(e, 64); err == nil { + sc.Param = value + } else { + return nil, errors.Wrapf(err, "cannot parse env var %s=%s", envSamplerParam, e) + } + } + + if e := os.Getenv(envSamplerManagerHostPort); e != "" { + sc.SamplingServerURL = e + } + + if e := os.Getenv(envSamplerMaxOperations); e != "" { + if value, err := strconv.ParseInt(e, 10, 0); err == nil { + sc.MaxOperations = int(value) + } else { + return nil, errors.Wrapf(err, "cannot parse env var %s=%s", envSamplerMaxOperations, e) + } + } + + if e := os.Getenv(envSamplerRefreshInterval); e != "" { + if value, err := time.ParseDuration(e); err == nil { + sc.SamplingRefreshInterval = value + } else { + return nil, errors.Wrapf(err, "cannot parse env var %s=%s", envSamplerRefreshInterval, e) + } + } + + return sc, nil +} + +// reporterConfigFromEnv creates a new ReporterConfig based on the environment variables +func reporterConfigFromEnv() (*ReporterConfig, error) { + rc := &ReporterConfig{} + + if e := os.Getenv(envReporterMaxQueueSize); e != "" { + if value, err := strconv.ParseInt(e, 10, 0); err == nil { + rc.QueueSize = int(value) + } else { + return nil, errors.Wrapf(err, "cannot parse env var %s=%s", envReporterMaxQueueSize, e) + } + } + + if e := os.Getenv(envReporterFlushInterval); e != "" { + if value, err := time.ParseDuration(e); err == nil { + rc.BufferFlushInterval = value + } else { + return nil, errors.Wrapf(err, "cannot parse env var %s=%s", envReporterFlushInterval, e) + } + } + + if e := os.Getenv(envReporterLogSpans); e != "" { + if value, err := strconv.ParseBool(e); err == nil { + rc.LogSpans = value + } else { + return nil, errors.Wrapf(err, "cannot parse env var %s=%s", envReporterLogSpans, e) + } + } + + host := jaeger.DefaultUDPSpanServerHost + if e := os.Getenv(envAgentHost); e != "" { + host = e + } + + port := jaeger.DefaultUDPSpanServerPort + if e := os.Getenv(envAgentPort); e != "" { + if value, err := strconv.ParseInt(e, 10, 0); err == nil { + port = int(value) + } else { + return nil, errors.Wrapf(err, "cannot parse env var %s=%s", envAgentPort, e) + } + } + + // the side effect of this is that we are building the default value, even if none of the env vars + // were not explicitly passed + rc.LocalAgentHostPort = fmt.Sprintf("%s:%d", host, port) + + return rc, nil +} + +// parseTags parses the given string into a collection of Tags. +// Spec for this value: +// - comma separated list of key=value +// - value can be specified using the notation ${envVar:defaultValue}, where `envVar` +// is an environment variable and `defaultValue` is the value to use in case the env var is not set +func parseTags(sTags string) []opentracing.Tag { + pairs := strings.Split(sTags, ",") + tags := make([]opentracing.Tag, 0) + for _, p := range pairs { + kv := strings.SplitN(p, "=", 2) + k, v := strings.TrimSpace(kv[0]), strings.TrimSpace(kv[1]) + + if strings.HasPrefix(v, "${") && strings.HasSuffix(v, "}") { + ed := strings.SplitN(v[2:len(v)-1], ":", 2) + e, d := ed[0], ed[1] + v = os.Getenv(e) + if v == "" && d != "" { + v = d + } + } + + tag := opentracing.Tag{Key: k, Value: v} + tags = append(tags, tag) + } + + return tags +} diff --git a/vendor/github.com/uber/jaeger-client-go/config/options.go b/vendor/github.com/uber/jaeger-client-go/config/options.go new file mode 100644 index 000000000000..d14f1f8a9b2b --- /dev/null +++ b/vendor/github.com/uber/jaeger-client-go/config/options.go @@ -0,0 +1,148 @@ +// Copyright (c) 2017 Uber Technologies, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package config + +import ( + opentracing "github.com/opentracing/opentracing-go" + "github.com/uber/jaeger-lib/metrics" + + "github.com/uber/jaeger-client-go" +) + +// Option is a function that sets some option on the client. +type Option func(c *Options) + +// Options control behavior of the client. +type Options struct { + metrics metrics.Factory + logger jaeger.Logger + reporter jaeger.Reporter + sampler jaeger.Sampler + contribObservers []jaeger.ContribObserver + observers []jaeger.Observer + gen128Bit bool + zipkinSharedRPCSpan bool + maxTagValueLength int + tags []opentracing.Tag + injectors map[interface{}]jaeger.Injector + extractors map[interface{}]jaeger.Extractor +} + +// Metrics creates an Option that initializes Metrics in the tracer, +// which is used to emit statistics about spans. +func Metrics(factory metrics.Factory) Option { + return func(c *Options) { + c.metrics = factory + } +} + +// Logger can be provided to log Reporter errors, as well as to log spans +// if Reporter.LogSpans is set to true. +func Logger(logger jaeger.Logger) Option { + return func(c *Options) { + c.logger = logger + } +} + +// Reporter can be provided explicitly to override the configuration. +// Useful for testing, e.g. by passing InMemoryReporter. +func Reporter(reporter jaeger.Reporter) Option { + return func(c *Options) { + c.reporter = reporter + } +} + +// Sampler can be provided explicitly to override the configuration. +func Sampler(sampler jaeger.Sampler) Option { + return func(c *Options) { + c.sampler = sampler + } +} + +// Observer can be registered with the Tracer to receive notifications about new Spans. +func Observer(observer jaeger.Observer) Option { + return func(c *Options) { + c.observers = append(c.observers, observer) + } +} + +// ContribObserver can be registered with the Tracer to recieve notifications +// about new spans. +func ContribObserver(observer jaeger.ContribObserver) Option { + return func(c *Options) { + c.contribObservers = append(c.contribObservers, observer) + } +} + +// Gen128Bit specifies whether to generate 128bit trace IDs. +func Gen128Bit(gen128Bit bool) Option { + return func(c *Options) { + c.gen128Bit = gen128Bit + } +} + +// ZipkinSharedRPCSpan creates an option that enables sharing span ID between client +// and server spans a la zipkin. If false, client and server spans will be assigned +// different IDs. +func ZipkinSharedRPCSpan(zipkinSharedRPCSpan bool) Option { + return func(c *Options) { + c.zipkinSharedRPCSpan = zipkinSharedRPCSpan + } +} + +// MaxTagValueLength can be provided to override the default max tag value length. +func MaxTagValueLength(maxTagValueLength int) Option { + return func(c *Options) { + c.maxTagValueLength = maxTagValueLength + } +} + +// Tag creates an option that adds a tracer-level tag. +func Tag(key string, value interface{}) Option { + return func(c *Options) { + c.tags = append(c.tags, opentracing.Tag{Key: key, Value: value}) + } +} + +// Injector registers an Injector with the given format. +func Injector(format interface{}, injector jaeger.Injector) Option { + return func(c *Options) { + c.injectors[format] = injector + } +} + +// Extractor registers an Extractor with the given format. +func Extractor(format interface{}, extractor jaeger.Extractor) Option { + return func(c *Options) { + c.extractors[format] = extractor + } +} + +func applyOptions(options ...Option) Options { + opts := Options{ + injectors: make(map[interface{}]jaeger.Injector), + extractors: make(map[interface{}]jaeger.Extractor), + } + for _, option := range options { + option(&opts) + } + if opts.metrics == nil { + opts.metrics = metrics.NullFactory + } + if opts.logger == nil { + opts.logger = jaeger.NullLogger + } + return opts +} diff --git a/vendor/github.com/uber/jaeger-client-go/constants.go b/vendor/github.com/uber/jaeger-client-go/constants.go new file mode 100644 index 000000000000..b5368ff3815b --- /dev/null +++ b/vendor/github.com/uber/jaeger-client-go/constants.go @@ -0,0 +1,88 @@ +// Copyright (c) 2017 Uber Technologies, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package jaeger + +const ( + // JaegerClientVersion is the version of the client library reported as Span tag. + JaegerClientVersion = "Go-2.15.0-dev" + + // JaegerClientVersionTagKey is the name of the tag used to report client version. + JaegerClientVersionTagKey = "jaeger.version" + + // JaegerDebugHeader is the name of HTTP header or a TextMap carrier key which, + // if found in the carrier, forces the trace to be sampled as "debug" trace. + // The value of the header is recorded as the tag on the root span, so that the + // trace can be found in the UI using this value as a correlation ID. + JaegerDebugHeader = "jaeger-debug-id" + + // JaegerBaggageHeader is the name of the HTTP header that is used to submit baggage. + // It differs from TraceBaggageHeaderPrefix in that it can be used only in cases where + // a root span does not exist. + JaegerBaggageHeader = "jaeger-baggage" + + // TracerHostnameTagKey used to report host name of the process. + TracerHostnameTagKey = "hostname" + + // TracerIPTagKey used to report ip of the process. + TracerIPTagKey = "ip" + + // TracerUUIDTagKey used to report UUID of the client process. + TracerUUIDTagKey = "client-uuid" + + // SamplerTypeTagKey reports which sampler was used on the root span. + SamplerTypeTagKey = "sampler.type" + + // SamplerParamTagKey reports the parameter of the sampler, like sampling probability. + SamplerParamTagKey = "sampler.param" + + // TraceContextHeaderName is the http header name used to propagate tracing context. + // This must be in lower-case to avoid mismatches when decoding incoming headers. + TraceContextHeaderName = "uber-trace-id" + + // TracerStateHeaderName is deprecated. + // Deprecated: use TraceContextHeaderName + TracerStateHeaderName = TraceContextHeaderName + + // TraceBaggageHeaderPrefix is the prefix for http headers used to propagate baggage. + // This must be in lower-case to avoid mismatches when decoding incoming headers. + TraceBaggageHeaderPrefix = "uberctx-" + + // SamplerTypeConst is the type of sampler that always makes the same decision. + SamplerTypeConst = "const" + + // SamplerTypeRemote is the type of sampler that polls Jaeger agent for sampling strategy. + SamplerTypeRemote = "remote" + + // SamplerTypeProbabilistic is the type of sampler that samples traces + // with a certain fixed probability. + SamplerTypeProbabilistic = "probabilistic" + + // SamplerTypeRateLimiting is the type of sampler that samples + // only up to a fixed number of traces per second. + SamplerTypeRateLimiting = "ratelimiting" + + // SamplerTypeLowerBound is the type of sampler that samples + // at least a fixed number of traces per second. + SamplerTypeLowerBound = "lowerbound" + + // DefaultUDPSpanServerHost is the default host to send the spans to, via UDP + DefaultUDPSpanServerHost = "localhost" + + // DefaultUDPSpanServerPort is the default port to send the spans to, via UDP + DefaultUDPSpanServerPort = 6831 + + // DefaultMaxTagValueLength is the default max length of byte array or string allowed in the tag value. + DefaultMaxTagValueLength = 256 +) diff --git a/vendor/github.com/uber/jaeger-client-go/context.go b/vendor/github.com/uber/jaeger-client-go/context.go new file mode 100644 index 000000000000..8b06173d9835 --- /dev/null +++ b/vendor/github.com/uber/jaeger-client-go/context.go @@ -0,0 +1,258 @@ +// Copyright (c) 2017 Uber Technologies, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package jaeger + +import ( + "errors" + "fmt" + "strconv" + "strings" +) + +const ( + flagSampled = byte(1) + flagDebug = byte(2) +) + +var ( + errEmptyTracerStateString = errors.New("Cannot convert empty string to tracer state") + errMalformedTracerStateString = errors.New("String does not match tracer state format") + + emptyContext = SpanContext{} +) + +// TraceID represents unique 128bit identifier of a trace +type TraceID struct { + High, Low uint64 +} + +// SpanID represents unique 64bit identifier of a span +type SpanID uint64 + +// SpanContext represents propagated span identity and state +type SpanContext struct { + // traceID represents globally unique ID of the trace. + // Usually generated as a random number. + traceID TraceID + + // spanID represents span ID that must be unique within its trace, + // but does not have to be globally unique. + spanID SpanID + + // parentID refers to the ID of the parent span. + // Should be 0 if the current span is a root span. + parentID SpanID + + // flags is a bitmap containing such bits as 'sampled' and 'debug'. + flags byte + + // Distributed Context baggage. The is a snapshot in time. + baggage map[string]string + + // debugID can be set to some correlation ID when the context is being + // extracted from a TextMap carrier. + // + // See JaegerDebugHeader in constants.go + debugID string +} + +// ForeachBaggageItem implements ForeachBaggageItem() of opentracing.SpanContext +func (c SpanContext) ForeachBaggageItem(handler func(k, v string) bool) { + for k, v := range c.baggage { + if !handler(k, v) { + break + } + } +} + +// IsSampled returns whether this trace was chosen for permanent storage +// by the sampling mechanism of the tracer. +func (c SpanContext) IsSampled() bool { + return (c.flags & flagSampled) == flagSampled +} + +// IsDebug indicates whether sampling was explicitly requested by the service. +func (c SpanContext) IsDebug() bool { + return (c.flags & flagDebug) == flagDebug +} + +// IsValid indicates whether this context actually represents a valid trace. +func (c SpanContext) IsValid() bool { + return c.traceID.IsValid() && c.spanID != 0 +} + +func (c SpanContext) String() string { + if c.traceID.High == 0 { + return fmt.Sprintf("%x:%x:%x:%x", c.traceID.Low, uint64(c.spanID), uint64(c.parentID), c.flags) + } + return fmt.Sprintf("%x%016x:%x:%x:%x", c.traceID.High, c.traceID.Low, uint64(c.spanID), uint64(c.parentID), c.flags) +} + +// ContextFromString reconstructs the Context encoded in a string +func ContextFromString(value string) (SpanContext, error) { + var context SpanContext + if value == "" { + return emptyContext, errEmptyTracerStateString + } + parts := strings.Split(value, ":") + if len(parts) != 4 { + return emptyContext, errMalformedTracerStateString + } + var err error + if context.traceID, err = TraceIDFromString(parts[0]); err != nil { + return emptyContext, err + } + if context.spanID, err = SpanIDFromString(parts[1]); err != nil { + return emptyContext, err + } + if context.parentID, err = SpanIDFromString(parts[2]); err != nil { + return emptyContext, err + } + flags, err := strconv.ParseUint(parts[3], 10, 8) + if err != nil { + return emptyContext, err + } + context.flags = byte(flags) + return context, nil +} + +// TraceID returns the trace ID of this span context +func (c SpanContext) TraceID() TraceID { + return c.traceID +} + +// SpanID returns the span ID of this span context +func (c SpanContext) SpanID() SpanID { + return c.spanID +} + +// ParentID returns the parent span ID of this span context +func (c SpanContext) ParentID() SpanID { + return c.parentID +} + +// NewSpanContext creates a new instance of SpanContext +func NewSpanContext(traceID TraceID, spanID, parentID SpanID, sampled bool, baggage map[string]string) SpanContext { + flags := byte(0) + if sampled { + flags = flagSampled + } + return SpanContext{ + traceID: traceID, + spanID: spanID, + parentID: parentID, + flags: flags, + baggage: baggage} +} + +// CopyFrom copies data from ctx into this context, including span identity and baggage. +// TODO This is only used by interop.go. Remove once TChannel Go supports OpenTracing. +func (c *SpanContext) CopyFrom(ctx *SpanContext) { + c.traceID = ctx.traceID + c.spanID = ctx.spanID + c.parentID = ctx.parentID + c.flags = ctx.flags + if l := len(ctx.baggage); l > 0 { + c.baggage = make(map[string]string, l) + for k, v := range ctx.baggage { + c.baggage[k] = v + } + } else { + c.baggage = nil + } +} + +// WithBaggageItem creates a new context with an extra baggage item. +func (c SpanContext) WithBaggageItem(key, value string) SpanContext { + var newBaggage map[string]string + if c.baggage == nil { + newBaggage = map[string]string{key: value} + } else { + newBaggage = make(map[string]string, len(c.baggage)+1) + for k, v := range c.baggage { + newBaggage[k] = v + } + newBaggage[key] = value + } + // Use positional parameters so the compiler will help catch new fields. + return SpanContext{c.traceID, c.spanID, c.parentID, c.flags, newBaggage, ""} +} + +// isDebugIDContainerOnly returns true when the instance of the context is only +// used to return the debug/correlation ID from extract() method. This happens +// in the situation when "jaeger-debug-id" header is passed in the carrier to +// the extract() method, but the request otherwise has no span context in it. +// Previously this would've returned opentracing.ErrSpanContextNotFound from the +// extract method, but now it returns a dummy context with only debugID filled in. +// +// See JaegerDebugHeader in constants.go +// See textMapPropagator#Extract +func (c *SpanContext) isDebugIDContainerOnly() bool { + return !c.traceID.IsValid() && c.debugID != "" +} + +// ------- TraceID ------- + +func (t TraceID) String() string { + if t.High == 0 { + return fmt.Sprintf("%x", t.Low) + } + return fmt.Sprintf("%x%016x", t.High, t.Low) +} + +// TraceIDFromString creates a TraceID from a hexadecimal string +func TraceIDFromString(s string) (TraceID, error) { + var hi, lo uint64 + var err error + if len(s) > 32 { + return TraceID{}, fmt.Errorf("TraceID cannot be longer than 32 hex characters: %s", s) + } else if len(s) > 16 { + hiLen := len(s) - 16 + if hi, err = strconv.ParseUint(s[0:hiLen], 16, 64); err != nil { + return TraceID{}, err + } + if lo, err = strconv.ParseUint(s[hiLen:], 16, 64); err != nil { + return TraceID{}, err + } + } else { + if lo, err = strconv.ParseUint(s, 16, 64); err != nil { + return TraceID{}, err + } + } + return TraceID{High: hi, Low: lo}, nil +} + +// IsValid checks if the trace ID is valid, i.e. not zero. +func (t TraceID) IsValid() bool { + return t.High != 0 || t.Low != 0 +} + +// ------- SpanID ------- + +func (s SpanID) String() string { + return fmt.Sprintf("%x", uint64(s)) +} + +// SpanIDFromString creates a SpanID from a hexadecimal string +func SpanIDFromString(s string) (SpanID, error) { + if len(s) > 16 { + return SpanID(0), fmt.Errorf("SpanID cannot be longer than 16 hex characters: %s", s) + } + id, err := strconv.ParseUint(s, 16, 64) + if err != nil { + return SpanID(0), err + } + return SpanID(id), nil +} diff --git a/vendor/github.com/uber/jaeger-client-go/contrib_observer.go b/vendor/github.com/uber/jaeger-client-go/contrib_observer.go new file mode 100644 index 000000000000..4ce1881f3b83 --- /dev/null +++ b/vendor/github.com/uber/jaeger-client-go/contrib_observer.go @@ -0,0 +1,56 @@ +// Copyright (c) 2017 Uber Technologies, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package jaeger + +import ( + opentracing "github.com/opentracing/opentracing-go" +) + +// ContribObserver can be registered with the Tracer to receive notifications +// about new Spans. Modelled after github.com/opentracing-contrib/go-observer. +type ContribObserver interface { + // Create and return a span observer. Called when a span starts. + // If the Observer is not interested in the given span, it must return (nil, false). + // E.g : + // func StartSpan(opName string, opts ...opentracing.StartSpanOption) { + // var sp opentracing.Span + // sso := opentracing.StartSpanOptions{} + // if spanObserver, ok := Observer.OnStartSpan(span, opName, sso); ok { + // // we have a valid SpanObserver + // } + // ... + // } + OnStartSpan(sp opentracing.Span, operationName string, options opentracing.StartSpanOptions) (ContribSpanObserver, bool) +} + +// ContribSpanObserver is created by the Observer and receives notifications +// about other Span events. This interface is meant to match +// github.com/opentracing-contrib/go-observer, via duck typing, without +// directly importing the go-observer package. +type ContribSpanObserver interface { + OnSetOperationName(operationName string) + OnSetTag(key string, value interface{}) + OnFinish(options opentracing.FinishOptions) +} + +// wrapper observer for the old observers (see observer.go) +type oldObserver struct { + obs Observer +} + +func (o *oldObserver) OnStartSpan(sp opentracing.Span, operationName string, options opentracing.StartSpanOptions) (ContribSpanObserver, bool) { + spanObserver := o.obs.OnStartSpan(operationName, options) + return spanObserver, spanObserver != nil +} diff --git a/vendor/github.com/uber/jaeger-client-go/doc.go b/vendor/github.com/uber/jaeger-client-go/doc.go new file mode 100644 index 000000000000..4f5549033d5d --- /dev/null +++ b/vendor/github.com/uber/jaeger-client-go/doc.go @@ -0,0 +1,24 @@ +// Copyright (c) 2017 Uber Technologies, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/* +Package jaeger implements an OpenTracing (http://opentracing.io) Tracer. +It is currently using Zipkin-compatible data model and can be directly +itegrated with Zipkin backend (http://zipkin.io). + +For integration instructions please refer to the README: + +https://github.com/uber/jaeger-client-go/blob/master/README.md +*/ +package jaeger diff --git a/vendor/github.com/uber/jaeger-client-go/glide.lock b/vendor/github.com/uber/jaeger-client-go/glide.lock new file mode 100644 index 000000000000..d76b1536171f --- /dev/null +++ b/vendor/github.com/uber/jaeger-client-go/glide.lock @@ -0,0 +1,89 @@ +hash: 3accf84f97bff4a91162736104c0e9b9790820712bd86db6fec5e665f7196a82 +updated: 2018-04-30T11:46:43.804556-04:00 +imports: +- name: github.com/beorn7/perks + version: 3a771d992973f24aa725d07868b467d1ddfceafb + subpackages: + - quantile +- name: github.com/codahale/hdrhistogram + version: 3a0bb77429bd3a61596f5e8a3172445844342120 +- name: github.com/crossdock/crossdock-go + version: 049aabb0122b03bc9bd30cab8f3f91fb60166361 + subpackages: + - assert + - require +- name: github.com/davecgh/go-spew + version: 8991bc29aa16c548c550c7ff78260e27b9ab7c73 + subpackages: + - spew +- name: github.com/golang/protobuf + version: bbd03ef6da3a115852eaf24c8a1c46aeb39aa175 + subpackages: + - proto +- name: github.com/matttproud/golang_protobuf_extensions + version: c12348ce28de40eed0136aa2b644d0ee0650e56c + subpackages: + - pbutil +- name: github.com/opentracing/opentracing-go + version: 1949ddbfd147afd4d964a9f00b24eb291e0e7c38 + subpackages: + - ext + - log +- name: github.com/pkg/errors + version: 645ef00459ed84a119197bfb8d8205042c6df63d +- name: github.com/pmezard/go-difflib + version: 792786c7400a136282c1664665ae0a8db921c6c2 + subpackages: + - difflib +- name: github.com/prometheus/client_golang + version: c5b7fccd204277076155f10851dad72b76a49317 + subpackages: + - prometheus +- name: github.com/prometheus/client_model + version: 99fa1f4be8e564e8a6b613da7fa6f46c9edafc6c + subpackages: + - go +- name: github.com/prometheus/common + version: 38c53a9f4bfcd932d1b00bfc65e256a7fba6b37a + subpackages: + - expfmt + - internal/bitbucket.org/ww/goautoneg + - model +- name: github.com/prometheus/procfs + version: 780932d4fbbe0e69b84c34c20f5c8d0981e109ea + subpackages: + - internal/util + - nfs + - xfs +- name: github.com/stretchr/testify + version: 12b6f73e6084dad08a7c6e575284b177ecafbc71 + subpackages: + - assert + - require + - suite +- name: github.com/uber/jaeger-lib + version: 4267858c0679cd4e47cefed8d7f70fd386cfb567 + subpackages: + - metrics + - metrics/prometheus + - metrics/testutils +- name: go.uber.org/atomic + version: 8474b86a5a6f79c443ce4b2992817ff32cf208b8 +- name: go.uber.org/multierr + version: 3c4937480c32f4c13a875a1829af76c98ca3d40a +- name: go.uber.org/zap + version: eeedf312bc6c57391d84767a4cd413f02a917974 + subpackages: + - buffer + - internal/bufferpool + - internal/color + - internal/exit + - zapcore +- name: golang.org/x/net + version: 6078986fec03a1dcc236c34816c71b0e05018fda + subpackages: + - context + - context/ctxhttp +testImports: +- name: github.com/uber-go/atomic + version: 8474b86a5a6f79c443ce4b2992817ff32cf208b8 diff --git a/vendor/github.com/uber/jaeger-client-go/glide.yaml b/vendor/github.com/uber/jaeger-client-go/glide.yaml new file mode 100644 index 000000000000..6637da215264 --- /dev/null +++ b/vendor/github.com/uber/jaeger-client-go/glide.yaml @@ -0,0 +1,22 @@ +package: github.com/uber/jaeger-client-go +import: +- package: github.com/opentracing/opentracing-go + version: ^1 + subpackages: + - ext + - log +- package: github.com/crossdock/crossdock-go +- package: github.com/uber/jaeger-lib + version: ^1.2.1 + subpackages: + - metrics +- package: github.com/pkg/errors + version: ~0.8.0 +testImport: +- package: github.com/stretchr/testify + subpackages: + - assert + - require + - suite +- package: github.com/prometheus/client_golang + version: v0.8.0 diff --git a/vendor/github.com/uber/jaeger-client-go/header.go b/vendor/github.com/uber/jaeger-client-go/header.go new file mode 100644 index 000000000000..19c2c055b813 --- /dev/null +++ b/vendor/github.com/uber/jaeger-client-go/header.go @@ -0,0 +1,64 @@ +// Copyright (c) 2017 Uber Technologies, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package jaeger + +// HeadersConfig contains the values for the header keys that Jaeger will use. +// These values may be either custom or default depending on whether custom +// values were provided via a configuration. +type HeadersConfig struct { + // JaegerDebugHeader is the name of HTTP header or a TextMap carrier key which, + // if found in the carrier, forces the trace to be sampled as "debug" trace. + // The value of the header is recorded as the tag on the root span, so that the + // trace can be found in the UI using this value as a correlation ID. + JaegerDebugHeader string `yaml:"jaegerDebugHeader"` + + // JaegerBaggageHeader is the name of the HTTP header that is used to submit baggage. + // It differs from TraceBaggageHeaderPrefix in that it can be used only in cases where + // a root span does not exist. + JaegerBaggageHeader string `yaml:"jaegerBaggageHeader"` + + // TraceContextHeaderName is the http header name used to propagate tracing context. + // This must be in lower-case to avoid mismatches when decoding incoming headers. + TraceContextHeaderName string `yaml:"TraceContextHeaderName"` + + // TraceBaggageHeaderPrefix is the prefix for http headers used to propagate baggage. + // This must be in lower-case to avoid mismatches when decoding incoming headers. + TraceBaggageHeaderPrefix string `yaml:"traceBaggageHeaderPrefix"` +} + +func (c *HeadersConfig) applyDefaults() *HeadersConfig { + if c.JaegerBaggageHeader == "" { + c.JaegerBaggageHeader = JaegerBaggageHeader + } + if c.JaegerDebugHeader == "" { + c.JaegerDebugHeader = JaegerDebugHeader + } + if c.TraceBaggageHeaderPrefix == "" { + c.TraceBaggageHeaderPrefix = TraceBaggageHeaderPrefix + } + if c.TraceContextHeaderName == "" { + c.TraceContextHeaderName = TraceContextHeaderName + } + return c +} + +func getDefaultHeadersConfig() *HeadersConfig { + return &HeadersConfig{ + JaegerDebugHeader: JaegerDebugHeader, + JaegerBaggageHeader: JaegerBaggageHeader, + TraceContextHeaderName: TraceContextHeaderName, + TraceBaggageHeaderPrefix: TraceBaggageHeaderPrefix, + } +} diff --git a/vendor/github.com/uber/jaeger-client-go/internal/baggage/remote/options.go b/vendor/github.com/uber/jaeger-client-go/internal/baggage/remote/options.go new file mode 100644 index 000000000000..745729319ff0 --- /dev/null +++ b/vendor/github.com/uber/jaeger-client-go/internal/baggage/remote/options.go @@ -0,0 +1,101 @@ +// Copyright (c) 2017 Uber Technologies, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package remote + +import ( + "time" + + "github.com/uber/jaeger-client-go" +) + +const ( + defaultMaxValueLength = 2048 + defaultRefreshInterval = time.Minute + defaultHostPort = "localhost:5778" +) + +// Option is a function that sets some option on the RestrictionManager +type Option func(options *options) + +// Options is a factory for all available options +var Options options + +type options struct { + denyBaggageOnInitializationFailure bool + metrics *jaeger.Metrics + logger jaeger.Logger + hostPort string + refreshInterval time.Duration +} + +// DenyBaggageOnInitializationFailure creates an Option that determines the startup failure mode of RestrictionManager. +// If DenyBaggageOnInitializationFailure is true, RestrictionManager will not allow any baggage to be written until baggage +// restrictions have been retrieved from agent. +// If DenyBaggageOnInitializationFailure is false, RestrictionManager will allow any baggage to be written until baggage +// restrictions have been retrieved from agent. +func (options) DenyBaggageOnInitializationFailure(b bool) Option { + return func(o *options) { + o.denyBaggageOnInitializationFailure = b + } +} + +// Metrics creates an Option that initializes Metrics on the RestrictionManager, which is used to emit statistics. +func (options) Metrics(m *jaeger.Metrics) Option { + return func(o *options) { + o.metrics = m + } +} + +// Logger creates an Option that sets the logger used by the RestrictionManager. +func (options) Logger(logger jaeger.Logger) Option { + return func(o *options) { + o.logger = logger + } +} + +// HostPort creates an Option that sets the hostPort of the local agent that contains the baggage restrictions. +func (options) HostPort(hostPort string) Option { + return func(o *options) { + o.hostPort = hostPort + } +} + +// RefreshInterval creates an Option that sets how often the RestrictionManager will poll local agent for +// the baggage restrictions. +func (options) RefreshInterval(refreshInterval time.Duration) Option { + return func(o *options) { + o.refreshInterval = refreshInterval + } +} + +func applyOptions(o ...Option) options { + opts := options{} + for _, option := range o { + option(&opts) + } + if opts.metrics == nil { + opts.metrics = jaeger.NewNullMetrics() + } + if opts.logger == nil { + opts.logger = jaeger.NullLogger + } + if opts.hostPort == "" { + opts.hostPort = defaultHostPort + } + if opts.refreshInterval == 0 { + opts.refreshInterval = defaultRefreshInterval + } + return opts +} diff --git a/vendor/github.com/uber/jaeger-client-go/internal/baggage/remote/restriction_manager.go b/vendor/github.com/uber/jaeger-client-go/internal/baggage/remote/restriction_manager.go new file mode 100644 index 000000000000..a56515acab86 --- /dev/null +++ b/vendor/github.com/uber/jaeger-client-go/internal/baggage/remote/restriction_manager.go @@ -0,0 +1,157 @@ +// Copyright (c) 2017 Uber Technologies, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package remote + +import ( + "fmt" + "net/url" + "sync" + "time" + + "github.com/uber/jaeger-client-go/internal/baggage" + thrift "github.com/uber/jaeger-client-go/thrift-gen/baggage" + "github.com/uber/jaeger-client-go/utils" +) + +type httpBaggageRestrictionManagerProxy struct { + url string +} + +func newHTTPBaggageRestrictionManagerProxy(hostPort, serviceName string) *httpBaggageRestrictionManagerProxy { + v := url.Values{} + v.Set("service", serviceName) + return &httpBaggageRestrictionManagerProxy{ + url: fmt.Sprintf("http://%s/baggageRestrictions?%s", hostPort, v.Encode()), + } +} + +func (s *httpBaggageRestrictionManagerProxy) GetBaggageRestrictions(serviceName string) ([]*thrift.BaggageRestriction, error) { + var out []*thrift.BaggageRestriction + if err := utils.GetJSON(s.url, &out); err != nil { + return nil, err + } + return out, nil +} + +// RestrictionManager manages baggage restrictions by retrieving baggage restrictions from agent +type RestrictionManager struct { + options + + mux sync.RWMutex + serviceName string + restrictions map[string]*baggage.Restriction + thriftProxy thrift.BaggageRestrictionManager + pollStopped sync.WaitGroup + stopPoll chan struct{} + invalidRestriction *baggage.Restriction + validRestriction *baggage.Restriction + + // Determines if the manager has successfully retrieved baggage restrictions from agent + initialized bool +} + +// NewRestrictionManager returns a BaggageRestrictionManager that polls the agent for the latest +// baggage restrictions. +func NewRestrictionManager(serviceName string, options ...Option) *RestrictionManager { + // TODO there is a developing use case where a single tracer can generate traces on behalf of many services. + // restrictionsMap will need to exist per service + opts := applyOptions(options...) + m := &RestrictionManager{ + serviceName: serviceName, + options: opts, + restrictions: make(map[string]*baggage.Restriction), + thriftProxy: newHTTPBaggageRestrictionManagerProxy(opts.hostPort, serviceName), + stopPoll: make(chan struct{}), + invalidRestriction: baggage.NewRestriction(false, 0), + validRestriction: baggage.NewRestriction(true, defaultMaxValueLength), + } + m.pollStopped.Add(1) + go m.pollManager() + return m +} + +// isReady returns true if the manager has retrieved baggage restrictions from the remote source. +func (m *RestrictionManager) isReady() bool { + m.mux.RLock() + defer m.mux.RUnlock() + return m.initialized +} + +// GetRestriction implements RestrictionManager#GetRestriction. +func (m *RestrictionManager) GetRestriction(service, key string) *baggage.Restriction { + m.mux.RLock() + defer m.mux.RUnlock() + if !m.initialized { + if m.denyBaggageOnInitializationFailure { + return m.invalidRestriction + } + return m.validRestriction + } + if restriction, ok := m.restrictions[key]; ok { + return restriction + } + return m.invalidRestriction +} + +// Close stops remote polling and closes the RemoteRestrictionManager. +func (m *RestrictionManager) Close() error { + close(m.stopPoll) + m.pollStopped.Wait() + return nil +} + +func (m *RestrictionManager) pollManager() { + defer m.pollStopped.Done() + // attempt to initialize baggage restrictions + if err := m.updateRestrictions(); err != nil { + m.logger.Error(fmt.Sprintf("Failed to initialize baggage restrictions: %s", err.Error())) + } + ticker := time.NewTicker(m.refreshInterval) + defer ticker.Stop() + + for { + select { + case <-ticker.C: + if err := m.updateRestrictions(); err != nil { + m.logger.Error(fmt.Sprintf("Failed to update baggage restrictions: %s", err.Error())) + } + case <-m.stopPoll: + return + } + } +} + +func (m *RestrictionManager) updateRestrictions() error { + restrictions, err := m.thriftProxy.GetBaggageRestrictions(m.serviceName) + if err != nil { + m.metrics.BaggageRestrictionsUpdateFailure.Inc(1) + return err + } + newRestrictions := m.parseRestrictions(restrictions) + m.metrics.BaggageRestrictionsUpdateSuccess.Inc(1) + m.mux.Lock() + defer m.mux.Unlock() + m.initialized = true + m.restrictions = newRestrictions + return nil +} + +func (m *RestrictionManager) parseRestrictions(restrictions []*thrift.BaggageRestriction) map[string]*baggage.Restriction { + setters := make(map[string]*baggage.Restriction, len(restrictions)) + for _, restriction := range restrictions { + setters[restriction.BaggageKey] = baggage.NewRestriction(true, int(restriction.MaxValueLength)) + } + return setters +} diff --git a/vendor/github.com/uber/jaeger-client-go/internal/baggage/restriction_manager.go b/vendor/github.com/uber/jaeger-client-go/internal/baggage/restriction_manager.go new file mode 100644 index 000000000000..c16a5c566291 --- /dev/null +++ b/vendor/github.com/uber/jaeger-client-go/internal/baggage/restriction_manager.go @@ -0,0 +1,71 @@ +// Copyright (c) 2017 Uber Technologies, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package baggage + +const ( + defaultMaxValueLength = 2048 +) + +// Restriction determines whether a baggage key is allowed and contains any restrictions on the baggage value. +type Restriction struct { + keyAllowed bool + maxValueLength int +} + +// NewRestriction returns a new Restriction. +func NewRestriction(keyAllowed bool, maxValueLength int) *Restriction { + return &Restriction{ + keyAllowed: keyAllowed, + maxValueLength: maxValueLength, + } +} + +// KeyAllowed returns whether the baggage key for this restriction is allowed. +func (r *Restriction) KeyAllowed() bool { + return r.keyAllowed +} + +// MaxValueLength returns the max length for the baggage value. +func (r *Restriction) MaxValueLength() int { + return r.maxValueLength +} + +// RestrictionManager keeps track of valid baggage keys and their restrictions. The manager +// will return a Restriction for a specific baggage key which will determine whether the baggage +// key is allowed for the current service and any other applicable restrictions on the baggage +// value. +type RestrictionManager interface { + GetRestriction(service, key string) *Restriction +} + +// DefaultRestrictionManager allows any baggage key. +type DefaultRestrictionManager struct { + defaultRestriction *Restriction +} + +// NewDefaultRestrictionManager returns a DefaultRestrictionManager. +func NewDefaultRestrictionManager(maxValueLength int) *DefaultRestrictionManager { + if maxValueLength == 0 { + maxValueLength = defaultMaxValueLength + } + return &DefaultRestrictionManager{ + defaultRestriction: &Restriction{keyAllowed: true, maxValueLength: maxValueLength}, + } +} + +// GetRestriction implements RestrictionManager#GetRestriction. +func (m *DefaultRestrictionManager) GetRestriction(service, key string) *Restriction { + return m.defaultRestriction +} diff --git a/vendor/github.com/uber/jaeger-client-go/internal/spanlog/json.go b/vendor/github.com/uber/jaeger-client-go/internal/spanlog/json.go new file mode 100644 index 000000000000..0e10b8a5aa8e --- /dev/null +++ b/vendor/github.com/uber/jaeger-client-go/internal/spanlog/json.go @@ -0,0 +1,81 @@ +// Copyright (c) 2017 Uber Technologies, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spanlog + +import ( + "encoding/json" + "fmt" + + "github.com/opentracing/opentracing-go/log" +) + +type fieldsAsMap map[string]string + +// MaterializeWithJSON converts log Fields into JSON string +// TODO refactor into pluggable materializer +func MaterializeWithJSON(logFields []log.Field) ([]byte, error) { + fields := fieldsAsMap(make(map[string]string, len(logFields))) + for _, field := range logFields { + field.Marshal(fields) + } + if event, ok := fields["event"]; ok && len(fields) == 1 { + return []byte(event), nil + } + return json.Marshal(fields) +} + +func (ml fieldsAsMap) EmitString(key, value string) { + ml[key] = value +} + +func (ml fieldsAsMap) EmitBool(key string, value bool) { + ml[key] = fmt.Sprintf("%t", value) +} + +func (ml fieldsAsMap) EmitInt(key string, value int) { + ml[key] = fmt.Sprintf("%d", value) +} + +func (ml fieldsAsMap) EmitInt32(key string, value int32) { + ml[key] = fmt.Sprintf("%d", value) +} + +func (ml fieldsAsMap) EmitInt64(key string, value int64) { + ml[key] = fmt.Sprintf("%d", value) +} + +func (ml fieldsAsMap) EmitUint32(key string, value uint32) { + ml[key] = fmt.Sprintf("%d", value) +} + +func (ml fieldsAsMap) EmitUint64(key string, value uint64) { + ml[key] = fmt.Sprintf("%d", value) +} + +func (ml fieldsAsMap) EmitFloat32(key string, value float32) { + ml[key] = fmt.Sprintf("%f", value) +} + +func (ml fieldsAsMap) EmitFloat64(key string, value float64) { + ml[key] = fmt.Sprintf("%f", value) +} + +func (ml fieldsAsMap) EmitObject(key string, value interface{}) { + ml[key] = fmt.Sprintf("%+v", value) +} + +func (ml fieldsAsMap) EmitLazyLogger(value log.LazyLogger) { + value(ml) +} diff --git a/vendor/github.com/uber/jaeger-client-go/internal/throttler/remote/options.go b/vendor/github.com/uber/jaeger-client-go/internal/throttler/remote/options.go new file mode 100644 index 000000000000..f52c322fb69a --- /dev/null +++ b/vendor/github.com/uber/jaeger-client-go/internal/throttler/remote/options.go @@ -0,0 +1,99 @@ +// Copyright (c) 2018 The Jaeger Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package remote + +import ( + "time" + + "github.com/uber/jaeger-client-go" +) + +const ( + defaultHostPort = "localhost:5778" + defaultRefreshInterval = time.Second * 5 +) + +// Option is a function that sets some option on the Throttler +type Option func(options *options) + +// Options is a factory for all available options +var Options options + +type options struct { + metrics *jaeger.Metrics + logger jaeger.Logger + hostPort string + refreshInterval time.Duration + synchronousInitialization bool +} + +// Metrics creates an Option that initializes Metrics on the Throttler, which is used to emit statistics. +func (options) Metrics(m *jaeger.Metrics) Option { + return func(o *options) { + o.metrics = m + } +} + +// Logger creates an Option that sets the logger used by the Throttler. +func (options) Logger(logger jaeger.Logger) Option { + return func(o *options) { + o.logger = logger + } +} + +// HostPort creates an Option that sets the hostPort of the local agent that keeps track of credits. +func (options) HostPort(hostPort string) Option { + return func(o *options) { + o.hostPort = hostPort + } +} + +// RefreshInterval creates an Option that sets how often the Throttler will poll local agent for +// credits. +func (options) RefreshInterval(refreshInterval time.Duration) Option { + return func(o *options) { + o.refreshInterval = refreshInterval + } +} + +// SynchronousInitialization creates an Option that determines whether the throttler should synchronously +// fetch credits from the agent when an operation is seen for the first time. This should be set to true +// if the client will be used by a short lived service that needs to ensure that credits are fetched upfront +// such that sampling or throttling occurs. +func (options) SynchronousInitialization(b bool) Option { + return func(o *options) { + o.synchronousInitialization = b + } +} + +func applyOptions(o ...Option) options { + opts := options{} + for _, option := range o { + option(&opts) + } + if opts.metrics == nil { + opts.metrics = jaeger.NewNullMetrics() + } + if opts.logger == nil { + opts.logger = jaeger.NullLogger + } + if opts.hostPort == "" { + opts.hostPort = defaultHostPort + } + if opts.refreshInterval == 0 { + opts.refreshInterval = defaultRefreshInterval + } + return opts +} diff --git a/vendor/github.com/uber/jaeger-client-go/internal/throttler/remote/throttler.go b/vendor/github.com/uber/jaeger-client-go/internal/throttler/remote/throttler.go new file mode 100644 index 000000000000..20f434fe4956 --- /dev/null +++ b/vendor/github.com/uber/jaeger-client-go/internal/throttler/remote/throttler.go @@ -0,0 +1,216 @@ +// Copyright (c) 2018 The Jaeger Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package remote + +import ( + "fmt" + "net/url" + "sync" + "sync/atomic" + "time" + + "github.com/pkg/errors" + + "github.com/uber/jaeger-client-go" + "github.com/uber/jaeger-client-go/utils" +) + +const ( + // minimumCredits is the minimum amount of credits necessary to not be throttled. + // i.e. if currentCredits > minimumCredits, then the operation will not be throttled. + minimumCredits = 1.0 +) + +var ( + errorUUIDNotSet = errors.New("Throttler UUID must be set") +) + +type operationBalance struct { + Operation string `json:"operation"` + Balance float64 `json:"balance"` +} + +type creditResponse struct { + Balances []operationBalance `json:"balances"` +} + +type httpCreditManagerProxy struct { + hostPort string +} + +func newHTTPCreditManagerProxy(hostPort string) *httpCreditManagerProxy { + return &httpCreditManagerProxy{ + hostPort: hostPort, + } +} + +// N.B. Operations list must not be empty. +func (m *httpCreditManagerProxy) FetchCredits(uuid, serviceName string, operations []string) (*creditResponse, error) { + params := url.Values{} + params.Set("service", serviceName) + params.Set("uuid", uuid) + for _, op := range operations { + params.Add("operations", op) + } + var resp creditResponse + if err := utils.GetJSON(fmt.Sprintf("http://%s/credits?%s", m.hostPort, params.Encode()), &resp); err != nil { + return nil, errors.Wrap(err, "Failed to receive credits from agent") + } + return &resp, nil +} + +// Throttler retrieves credits from agent and uses it to throttle operations. +type Throttler struct { + options + + mux sync.RWMutex + service string + uuid atomic.Value + creditManager *httpCreditManagerProxy + credits map[string]float64 // map of operation->credits + close chan struct{} + stopped sync.WaitGroup +} + +// NewThrottler returns a Throttler that polls agent for credits and uses them to throttle +// the service. +func NewThrottler(service string, options ...Option) *Throttler { + opts := applyOptions(options...) + creditManager := newHTTPCreditManagerProxy(opts.hostPort) + t := &Throttler{ + options: opts, + creditManager: creditManager, + service: service, + credits: make(map[string]float64), + close: make(chan struct{}), + } + t.stopped.Add(1) + go t.pollManager() + return t +} + +// IsAllowed implements Throttler#IsAllowed. +func (t *Throttler) IsAllowed(operation string) bool { + t.mux.Lock() + defer t.mux.Unlock() + value, ok := t.credits[operation] + if !ok || value == 0 { + if !ok { + // NOTE: This appears to be a no-op at first glance, but it stores + // the operation key in the map. Necessary for functionality of + // Throttler#operations method. + t.credits[operation] = 0 + } + if !t.synchronousInitialization { + t.metrics.ThrottledDebugSpans.Inc(1) + return false + } + // If it is the first time this operation is being checked, synchronously fetch + // the credits. + credits, err := t.fetchCredits([]string{operation}) + if err != nil { + // Failed to receive credits from agent, try again next time + t.logger.Error("Failed to fetch credits: " + err.Error()) + return false + } + if len(credits.Balances) == 0 { + // This shouldn't happen but just in case + return false + } + for _, opBalance := range credits.Balances { + t.credits[opBalance.Operation] += opBalance.Balance + } + } + return t.isAllowed(operation) +} + +// Close stops the throttler from fetching credits from remote. +func (t *Throttler) Close() error { + close(t.close) + t.stopped.Wait() + return nil +} + +// SetProcess implements ProcessSetter#SetProcess. It's imperative that the UUID is set before any remote +// requests are made. +func (t *Throttler) SetProcess(process jaeger.Process) { + if process.UUID != "" { + t.uuid.Store(process.UUID) + } +} + +// N.B. This function must be called with the Write Lock +func (t *Throttler) isAllowed(operation string) bool { + credits := t.credits[operation] + if credits < minimumCredits { + t.metrics.ThrottledDebugSpans.Inc(1) + return false + } + t.credits[operation] = credits - minimumCredits + return true +} + +func (t *Throttler) pollManager() { + defer t.stopped.Done() + ticker := time.NewTicker(t.refreshInterval) + defer ticker.Stop() + for { + select { + case <-ticker.C: + t.refreshCredits() + case <-t.close: + return + } + } +} + +func (t *Throttler) operations() []string { + t.mux.RLock() + defer t.mux.RUnlock() + operations := make([]string, 0, len(t.credits)) + for op := range t.credits { + operations = append(operations, op) + } + return operations +} + +func (t *Throttler) refreshCredits() { + operations := t.operations() + if len(operations) == 0 { + return + } + newCredits, err := t.fetchCredits(operations) + if err != nil { + t.metrics.ThrottlerUpdateFailure.Inc(1) + t.logger.Error("Failed to fetch credits: " + err.Error()) + return + } + t.metrics.ThrottlerUpdateSuccess.Inc(1) + + t.mux.Lock() + defer t.mux.Unlock() + for _, opBalance := range newCredits.Balances { + t.credits[opBalance.Operation] += opBalance.Balance + } +} + +func (t *Throttler) fetchCredits(operations []string) (*creditResponse, error) { + uuid := t.uuid.Load() + uuidStr, _ := uuid.(string) + if uuid == nil || uuidStr == "" { + return nil, errorUUIDNotSet + } + return t.creditManager.FetchCredits(uuidStr, t.service, operations) +} diff --git a/vendor/github.com/uber/jaeger-client-go/internal/throttler/throttler.go b/vendor/github.com/uber/jaeger-client-go/internal/throttler/throttler.go new file mode 100644 index 000000000000..196ed69cacae --- /dev/null +++ b/vendor/github.com/uber/jaeger-client-go/internal/throttler/throttler.go @@ -0,0 +1,32 @@ +// Copyright (c) 2018 The Jaeger Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package throttler + +// Throttler is used to rate limits operations. For example, given how debug spans +// are always sampled, a throttler can be enabled per client to rate limit the amount +// of debug spans a client can start. +type Throttler interface { + // IsAllowed determines whether the operation should be allowed and not be + // throttled. + IsAllowed(operation string) bool +} + +// DefaultThrottler doesn't throttle at all. +type DefaultThrottler struct{} + +// IsAllowed implements Throttler#IsAllowed. +func (t DefaultThrottler) IsAllowed(operation string) bool { + return true +} diff --git a/vendor/github.com/uber/jaeger-client-go/interop.go b/vendor/github.com/uber/jaeger-client-go/interop.go new file mode 100644 index 000000000000..8402d087c29f --- /dev/null +++ b/vendor/github.com/uber/jaeger-client-go/interop.go @@ -0,0 +1,55 @@ +// Copyright (c) 2017 Uber Technologies, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package jaeger + +import ( + "github.com/opentracing/opentracing-go" +) + +// TODO this file should not be needed after TChannel PR. + +type formatKey int + +// SpanContextFormat is a constant used as OpenTracing Format. +// Requires *SpanContext as carrier. +// This format is intended for interop with TChannel or other Zipkin-like tracers. +const SpanContextFormat formatKey = iota + +type jaegerTraceContextPropagator struct { + tracer *Tracer +} + +func (p *jaegerTraceContextPropagator) Inject( + ctx SpanContext, + abstractCarrier interface{}, +) error { + carrier, ok := abstractCarrier.(*SpanContext) + if !ok { + return opentracing.ErrInvalidCarrier + } + + carrier.CopyFrom(&ctx) + return nil +} + +func (p *jaegerTraceContextPropagator) Extract(abstractCarrier interface{}) (SpanContext, error) { + carrier, ok := abstractCarrier.(*SpanContext) + if !ok { + return emptyContext, opentracing.ErrInvalidCarrier + } + ctx := new(SpanContext) + ctx.CopyFrom(carrier) + return *ctx, nil +} diff --git a/vendor/github.com/uber/jaeger-client-go/jaeger_tag.go b/vendor/github.com/uber/jaeger-client-go/jaeger_tag.go new file mode 100644 index 000000000000..868b2a5b5465 --- /dev/null +++ b/vendor/github.com/uber/jaeger-client-go/jaeger_tag.go @@ -0,0 +1,84 @@ +// Copyright (c) 2017 Uber Technologies, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package jaeger + +import ( + "fmt" + + "github.com/opentracing/opentracing-go/log" + + j "github.com/uber/jaeger-client-go/thrift-gen/jaeger" +) + +type tags []*j.Tag + +// ConvertLogsToJaegerTags converts log Fields into jaeger tags. +func ConvertLogsToJaegerTags(logFields []log.Field) []*j.Tag { + fields := tags(make([]*j.Tag, 0, len(logFields))) + for _, field := range logFields { + field.Marshal(&fields) + } + return fields +} + +func (t *tags) EmitString(key, value string) { + *t = append(*t, &j.Tag{Key: key, VType: j.TagType_STRING, VStr: &value}) +} + +func (t *tags) EmitBool(key string, value bool) { + *t = append(*t, &j.Tag{Key: key, VType: j.TagType_BOOL, VBool: &value}) +} + +func (t *tags) EmitInt(key string, value int) { + vLong := int64(value) + *t = append(*t, &j.Tag{Key: key, VType: j.TagType_LONG, VLong: &vLong}) +} + +func (t *tags) EmitInt32(key string, value int32) { + vLong := int64(value) + *t = append(*t, &j.Tag{Key: key, VType: j.TagType_LONG, VLong: &vLong}) +} + +func (t *tags) EmitInt64(key string, value int64) { + *t = append(*t, &j.Tag{Key: key, VType: j.TagType_LONG, VLong: &value}) +} + +func (t *tags) EmitUint32(key string, value uint32) { + vLong := int64(value) + *t = append(*t, &j.Tag{Key: key, VType: j.TagType_LONG, VLong: &vLong}) +} + +func (t *tags) EmitUint64(key string, value uint64) { + vLong := int64(value) + *t = append(*t, &j.Tag{Key: key, VType: j.TagType_LONG, VLong: &vLong}) +} + +func (t *tags) EmitFloat32(key string, value float32) { + vDouble := float64(value) + *t = append(*t, &j.Tag{Key: key, VType: j.TagType_DOUBLE, VDouble: &vDouble}) +} + +func (t *tags) EmitFloat64(key string, value float64) { + *t = append(*t, &j.Tag{Key: key, VType: j.TagType_DOUBLE, VDouble: &value}) +} + +func (t *tags) EmitObject(key string, value interface{}) { + vStr := fmt.Sprintf("%+v", value) + *t = append(*t, &j.Tag{Key: key, VType: j.TagType_STRING, VStr: &vStr}) +} + +func (t *tags) EmitLazyLogger(value log.LazyLogger) { + value(t) +} diff --git a/vendor/github.com/uber/jaeger-client-go/jaeger_thrift_span.go b/vendor/github.com/uber/jaeger-client-go/jaeger_thrift_span.go new file mode 100644 index 000000000000..6ce1caf873d9 --- /dev/null +++ b/vendor/github.com/uber/jaeger-client-go/jaeger_thrift_span.go @@ -0,0 +1,179 @@ +// Copyright (c) 2017 Uber Technologies, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package jaeger + +import ( + "time" + + "github.com/opentracing/opentracing-go" + + j "github.com/uber/jaeger-client-go/thrift-gen/jaeger" + "github.com/uber/jaeger-client-go/utils" +) + +// BuildJaegerThrift builds jaeger span based on internal span. +func BuildJaegerThrift(span *Span) *j.Span { + span.Lock() + defer span.Unlock() + startTime := utils.TimeToMicrosecondsSinceEpochInt64(span.startTime) + duration := span.duration.Nanoseconds() / int64(time.Microsecond) + jaegerSpan := &j.Span{ + TraceIdLow: int64(span.context.traceID.Low), + TraceIdHigh: int64(span.context.traceID.High), + SpanId: int64(span.context.spanID), + ParentSpanId: int64(span.context.parentID), + OperationName: span.operationName, + Flags: int32(span.context.flags), + StartTime: startTime, + Duration: duration, + Tags: buildTags(span.tags, span.tracer.options.maxTagValueLength), + Logs: buildLogs(span.logs), + References: buildReferences(span.references), + } + return jaegerSpan +} + +// BuildJaegerProcessThrift creates a thrift Process type. +func BuildJaegerProcessThrift(span *Span) *j.Process { + span.Lock() + defer span.Unlock() + return buildJaegerProcessThrift(span.tracer) +} + +func buildJaegerProcessThrift(tracer *Tracer) *j.Process { + process := &j.Process{ + ServiceName: tracer.serviceName, + Tags: buildTags(tracer.tags, tracer.options.maxTagValueLength), + } + if tracer.process.UUID != "" { + process.Tags = append(process.Tags, &j.Tag{Key: TracerUUIDTagKey, VStr: &tracer.process.UUID, VType: j.TagType_STRING}) + } + return process +} + +func buildTags(tags []Tag, maxTagValueLength int) []*j.Tag { + jTags := make([]*j.Tag, 0, len(tags)) + for _, tag := range tags { + jTag := buildTag(&tag, maxTagValueLength) + jTags = append(jTags, jTag) + } + return jTags +} + +func buildLogs(logs []opentracing.LogRecord) []*j.Log { + jLogs := make([]*j.Log, 0, len(logs)) + for _, log := range logs { + jLog := &j.Log{ + Timestamp: utils.TimeToMicrosecondsSinceEpochInt64(log.Timestamp), + Fields: ConvertLogsToJaegerTags(log.Fields), + } + jLogs = append(jLogs, jLog) + } + return jLogs +} + +func buildTag(tag *Tag, maxTagValueLength int) *j.Tag { + jTag := &j.Tag{Key: tag.key} + switch value := tag.value.(type) { + case string: + vStr := truncateString(value, maxTagValueLength) + jTag.VStr = &vStr + jTag.VType = j.TagType_STRING + case []byte: + if len(value) > maxTagValueLength { + value = value[:maxTagValueLength] + } + jTag.VBinary = value + jTag.VType = j.TagType_BINARY + case int: + vLong := int64(value) + jTag.VLong = &vLong + jTag.VType = j.TagType_LONG + case uint: + vLong := int64(value) + jTag.VLong = &vLong + jTag.VType = j.TagType_LONG + case int8: + vLong := int64(value) + jTag.VLong = &vLong + jTag.VType = j.TagType_LONG + case uint8: + vLong := int64(value) + jTag.VLong = &vLong + jTag.VType = j.TagType_LONG + case int16: + vLong := int64(value) + jTag.VLong = &vLong + jTag.VType = j.TagType_LONG + case uint16: + vLong := int64(value) + jTag.VLong = &vLong + jTag.VType = j.TagType_LONG + case int32: + vLong := int64(value) + jTag.VLong = &vLong + jTag.VType = j.TagType_LONG + case uint32: + vLong := int64(value) + jTag.VLong = &vLong + jTag.VType = j.TagType_LONG + case int64: + vLong := int64(value) + jTag.VLong = &vLong + jTag.VType = j.TagType_LONG + case uint64: + vLong := int64(value) + jTag.VLong = &vLong + jTag.VType = j.TagType_LONG + case float32: + vDouble := float64(value) + jTag.VDouble = &vDouble + jTag.VType = j.TagType_DOUBLE + case float64: + vDouble := float64(value) + jTag.VDouble = &vDouble + jTag.VType = j.TagType_DOUBLE + case bool: + vBool := value + jTag.VBool = &vBool + jTag.VType = j.TagType_BOOL + default: + vStr := truncateString(stringify(value), maxTagValueLength) + jTag.VStr = &vStr + jTag.VType = j.TagType_STRING + } + return jTag +} + +func buildReferences(references []Reference) []*j.SpanRef { + retMe := make([]*j.SpanRef, 0, len(references)) + for _, ref := range references { + if ref.Type == opentracing.ChildOfRef { + retMe = append(retMe, spanRef(ref.Context, j.SpanRefType_CHILD_OF)) + } else if ref.Type == opentracing.FollowsFromRef { + retMe = append(retMe, spanRef(ref.Context, j.SpanRefType_FOLLOWS_FROM)) + } + } + return retMe +} + +func spanRef(ctx SpanContext, refType j.SpanRefType) *j.SpanRef { + return &j.SpanRef{ + RefType: refType, + TraceIdLow: int64(ctx.traceID.Low), + TraceIdHigh: int64(ctx.traceID.High), + SpanId: int64(ctx.spanID), + } +} diff --git a/vendor/github.com/uber/jaeger-client-go/log/logger.go b/vendor/github.com/uber/jaeger-client-go/log/logger.go new file mode 100644 index 000000000000..894bb3dbf712 --- /dev/null +++ b/vendor/github.com/uber/jaeger-client-go/log/logger.go @@ -0,0 +1,90 @@ +// Copyright (c) 2017 Uber Technologies, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package log + +import ( + "bytes" + "fmt" + "log" + "sync" +) + +// Logger provides an abstract interface for logging from Reporters. +// Applications can provide their own implementation of this interface to adapt +// reporters logging to whatever logging library they prefer (stdlib log, +// logrus, go-logging, etc). +type Logger interface { + // Error logs a message at error priority + Error(msg string) + + // Infof logs a message at info priority + Infof(msg string, args ...interface{}) +} + +// StdLogger is implementation of the Logger interface that delegates to default `log` package +var StdLogger = &stdLogger{} + +type stdLogger struct{} + +func (l *stdLogger) Error(msg string) { + log.Printf("ERROR: %s", msg) +} + +// Infof logs a message at info priority +func (l *stdLogger) Infof(msg string, args ...interface{}) { + log.Printf(msg, args...) +} + +// NullLogger is implementation of the Logger interface that is no-op +var NullLogger = &nullLogger{} + +type nullLogger struct{} + +func (l *nullLogger) Error(msg string) {} +func (l *nullLogger) Infof(msg string, args ...interface{}) {} + +// BytesBufferLogger implements Logger backed by a bytes.Buffer. +type BytesBufferLogger struct { + mux sync.Mutex + buf bytes.Buffer +} + +// Error implements Logger. +func (l *BytesBufferLogger) Error(msg string) { + l.mux.Lock() + l.buf.WriteString(fmt.Sprintf("ERROR: %s\n", msg)) + l.mux.Unlock() +} + +// Infof implements Logger. +func (l *BytesBufferLogger) Infof(msg string, args ...interface{}) { + l.mux.Lock() + l.buf.WriteString("INFO: " + fmt.Sprintf(msg, args...) + "\n") + l.mux.Unlock() +} + +// String returns string representation of the underlying buffer. +func (l *BytesBufferLogger) String() string { + l.mux.Lock() + defer l.mux.Unlock() + return l.buf.String() +} + +// Flush empties the underlying buffer. +func (l *BytesBufferLogger) Flush() { + l.mux.Lock() + defer l.mux.Unlock() + l.buf.Reset() +} diff --git a/vendor/github.com/uber/jaeger-client-go/logger.go b/vendor/github.com/uber/jaeger-client-go/logger.go new file mode 100644 index 000000000000..d4f0b501923d --- /dev/null +++ b/vendor/github.com/uber/jaeger-client-go/logger.go @@ -0,0 +1,53 @@ +// Copyright (c) 2017 Uber Technologies, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package jaeger + +import "log" + +// NB This will be deprecated in 3.0.0, please use jaeger-client-go/log/logger instead. + +// Logger provides an abstract interface for logging from Reporters. +// Applications can provide their own implementation of this interface to adapt +// reporters logging to whatever logging library they prefer (stdlib log, +// logrus, go-logging, etc). +type Logger interface { + // Error logs a message at error priority + Error(msg string) + + // Infof logs a message at info priority + Infof(msg string, args ...interface{}) +} + +// StdLogger is implementation of the Logger interface that delegates to default `log` package +var StdLogger = &stdLogger{} + +type stdLogger struct{} + +func (l *stdLogger) Error(msg string) { + log.Printf("ERROR: %s", msg) +} + +// Infof logs a message at info priority +func (l *stdLogger) Infof(msg string, args ...interface{}) { + log.Printf(msg, args...) +} + +// NullLogger is implementation of the Logger interface that delegates to default `log` package +var NullLogger = &nullLogger{} + +type nullLogger struct{} + +func (l *nullLogger) Error(msg string) {} +func (l *nullLogger) Infof(msg string, args ...interface{}) {} diff --git a/vendor/github.com/uber/jaeger-client-go/metrics.go b/vendor/github.com/uber/jaeger-client-go/metrics.go new file mode 100644 index 000000000000..cadb2b9c0fb0 --- /dev/null +++ b/vendor/github.com/uber/jaeger-client-go/metrics.go @@ -0,0 +1,107 @@ +// Copyright (c) 2017-2018 Uber Technologies, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package jaeger + +import ( + "github.com/uber/jaeger-lib/metrics" +) + +// Metrics is a container of all stats emitted by Jaeger tracer. +type Metrics struct { + // Number of traces started by this tracer as sampled + TracesStartedSampled metrics.Counter `metric:"traces" tags:"state=started,sampled=y"` + + // Number of traces started by this tracer as not sampled + TracesStartedNotSampled metrics.Counter `metric:"traces" tags:"state=started,sampled=n"` + + // Number of externally started sampled traces this tracer joined + TracesJoinedSampled metrics.Counter `metric:"traces" tags:"state=joined,sampled=y"` + + // Number of externally started not-sampled traces this tracer joined + TracesJoinedNotSampled metrics.Counter `metric:"traces" tags:"state=joined,sampled=n"` + + // Number of sampled spans started by this tracer + SpansStartedSampled metrics.Counter `metric:"started_spans" tags:"sampled=y"` + + // Number of unsampled spans started by this tracer + SpansStartedNotSampled metrics.Counter `metric:"started_spans" tags:"sampled=n"` + + // Number of spans finished by this tracer + SpansFinished metrics.Counter `metric:"finished_spans"` + + // Number of errors decoding tracing context + DecodingErrors metrics.Counter `metric:"span_context_decoding_errors"` + + // Number of spans successfully reported + ReporterSuccess metrics.Counter `metric:"reporter_spans" tags:"result=ok"` + + // Number of spans not reported due to a Sender failure + ReporterFailure metrics.Counter `metric:"reporter_spans" tags:"result=err"` + + // Number of spans dropped due to internal queue overflow + ReporterDropped metrics.Counter `metric:"reporter_spans" tags:"result=dropped"` + + // Current number of spans in the reporter queue + ReporterQueueLength metrics.Gauge `metric:"reporter_queue_length"` + + // Number of times the Sampler succeeded to retrieve sampling strategy + SamplerRetrieved metrics.Counter `metric:"sampler_queries" tags:"result=ok"` + + // Number of times the Sampler failed to retrieve sampling strategy + SamplerQueryFailure metrics.Counter `metric:"sampler_queries" tags:"result=err"` + + // Number of times the Sampler succeeded to retrieve and update sampling strategy + SamplerUpdated metrics.Counter `metric:"sampler_updates" tags:"result=ok"` + + // Number of times the Sampler failed to update sampling strategy + SamplerUpdateFailure metrics.Counter `metric:"sampler_updates" tags:"result=err"` + + // Number of times baggage was successfully written or updated on spans. + BaggageUpdateSuccess metrics.Counter `metric:"baggage_updates" tags:"result=ok"` + + // Number of times baggage failed to write or update on spans. + BaggageUpdateFailure metrics.Counter `metric:"baggage_updates" tags:"result=err"` + + // Number of times baggage was truncated as per baggage restrictions. + BaggageTruncate metrics.Counter `metric:"baggage_truncations"` + + // Number of times baggage restrictions were successfully updated. + BaggageRestrictionsUpdateSuccess metrics.Counter `metric:"baggage_restrictions_updates" tags:"result=ok"` + + // Number of times baggage restrictions failed to update. + BaggageRestrictionsUpdateFailure metrics.Counter `metric:"baggage_restrictions_updates" tags:"result=err"` + + // Number of times debug spans were throttled. + ThrottledDebugSpans metrics.Counter `metric:"throttled_debug_spans"` + + // Number of times throttler successfully updated. + ThrottlerUpdateSuccess metrics.Counter `metric:"throttler_updates" tags:"result=ok"` + + // Number of times throttler failed to update. + ThrottlerUpdateFailure metrics.Counter `metric:"throttler_updates" tags:"result=err"` +} + +// NewMetrics creates a new Metrics struct and initializes it. +func NewMetrics(factory metrics.Factory, globalTags map[string]string) *Metrics { + m := &Metrics{} + // TODO the namespace "jaeger" should be configurable (e.g. in all-in-one "jaeger-client" would make more sense) + metrics.Init(m, factory.Namespace("jaeger", nil), globalTags) + return m +} + +// NewNullMetrics creates a new Metrics struct that won't report any metrics. +func NewNullMetrics() *Metrics { + return NewMetrics(metrics.NullFactory, nil) +} diff --git a/vendor/github.com/uber/jaeger-client-go/observer.go b/vendor/github.com/uber/jaeger-client-go/observer.go new file mode 100644 index 000000000000..7bbd028897a9 --- /dev/null +++ b/vendor/github.com/uber/jaeger-client-go/observer.go @@ -0,0 +1,88 @@ +// Copyright (c) 2017 Uber Technologies, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package jaeger + +import opentracing "github.com/opentracing/opentracing-go" + +// Observer can be registered with the Tracer to receive notifications about +// new Spans. +// +// Deprecated: use jaeger.ContribObserver instead. +type Observer interface { + OnStartSpan(operationName string, options opentracing.StartSpanOptions) SpanObserver +} + +// SpanObserver is created by the Observer and receives notifications about +// other Span events. +// +// Deprecated: use jaeger.ContribSpanObserver instead. +type SpanObserver interface { + OnSetOperationName(operationName string) + OnSetTag(key string, value interface{}) + OnFinish(options opentracing.FinishOptions) +} + +// compositeObserver is a dispatcher to other observers +type compositeObserver struct { + observers []ContribObserver +} + +// compositeSpanObserver is a dispatcher to other span observers +type compositeSpanObserver struct { + observers []ContribSpanObserver +} + +// noopSpanObserver is used when there are no observers registered +// on the Tracer or none of them returns span observers from OnStartSpan. +var noopSpanObserver = &compositeSpanObserver{} + +func (o *compositeObserver) append(contribObserver ContribObserver) { + o.observers = append(o.observers, contribObserver) +} + +func (o *compositeObserver) OnStartSpan(sp opentracing.Span, operationName string, options opentracing.StartSpanOptions) ContribSpanObserver { + var spanObservers []ContribSpanObserver + for _, obs := range o.observers { + spanObs, ok := obs.OnStartSpan(sp, operationName, options) + if ok { + if spanObservers == nil { + spanObservers = make([]ContribSpanObserver, 0, len(o.observers)) + } + spanObservers = append(spanObservers, spanObs) + } + } + if len(spanObservers) == 0 { + return noopSpanObserver + } + return &compositeSpanObserver{observers: spanObservers} +} + +func (o *compositeSpanObserver) OnSetOperationName(operationName string) { + for _, obs := range o.observers { + obs.OnSetOperationName(operationName) + } +} + +func (o *compositeSpanObserver) OnSetTag(key string, value interface{}) { + for _, obs := range o.observers { + obs.OnSetTag(key, value) + } +} + +func (o *compositeSpanObserver) OnFinish(options opentracing.FinishOptions) { + for _, obs := range o.observers { + obs.OnFinish(options) + } +} diff --git a/vendor/github.com/uber/jaeger-client-go/process.go b/vendor/github.com/uber/jaeger-client-go/process.go new file mode 100644 index 000000000000..30cbf99624c9 --- /dev/null +++ b/vendor/github.com/uber/jaeger-client-go/process.go @@ -0,0 +1,29 @@ +// Copyright (c) 2018 The Jaeger Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package jaeger + +// Process holds process specific metadata that's relevant to this client. +type Process struct { + Service string + UUID string + Tags []Tag +} + +// ProcessSetter sets a process. This can be used by any class that requires +// the process to be set as part of initialization. +// See internal/throttler/remote/throttler.go for an example. +type ProcessSetter interface { + SetProcess(process Process) +} diff --git a/vendor/github.com/uber/jaeger-client-go/propagation.go b/vendor/github.com/uber/jaeger-client-go/propagation.go new file mode 100644 index 000000000000..abca67a3c9b9 --- /dev/null +++ b/vendor/github.com/uber/jaeger-client-go/propagation.go @@ -0,0 +1,300 @@ +// Copyright (c) 2017 Uber Technologies, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package jaeger + +import ( + "bytes" + "encoding/binary" + "fmt" + "io" + "log" + "net/url" + "strings" + "sync" + + opentracing "github.com/opentracing/opentracing-go" +) + +// Injector is responsible for injecting SpanContext instances in a manner suitable +// for propagation via a format-specific "carrier" object. Typically the +// injection will take place across an RPC boundary, but message queues and +// other IPC mechanisms are also reasonable places to use an Injector. +type Injector interface { + // Inject takes `SpanContext` and injects it into `carrier`. The actual type + // of `carrier` depends on the `format` passed to `Tracer.Inject()`. + // + // Implementations may return opentracing.ErrInvalidCarrier or any other + // implementation-specific error if injection fails. + Inject(ctx SpanContext, carrier interface{}) error +} + +// Extractor is responsible for extracting SpanContext instances from a +// format-specific "carrier" object. Typically the extraction will take place +// on the server side of an RPC boundary, but message queues and other IPC +// mechanisms are also reasonable places to use an Extractor. +type Extractor interface { + // Extract decodes a SpanContext instance from the given `carrier`, + // or (nil, opentracing.ErrSpanContextNotFound) if no context could + // be found in the `carrier`. + Extract(carrier interface{}) (SpanContext, error) +} + +type textMapPropagator struct { + headerKeys *HeadersConfig + metrics Metrics + encodeValue func(string) string + decodeValue func(string) string +} + +func newTextMapPropagator(headerKeys *HeadersConfig, metrics Metrics) *textMapPropagator { + return &textMapPropagator{ + headerKeys: headerKeys, + metrics: metrics, + encodeValue: func(val string) string { + return val + }, + decodeValue: func(val string) string { + return val + }, + } +} + +func newHTTPHeaderPropagator(headerKeys *HeadersConfig, metrics Metrics) *textMapPropagator { + return &textMapPropagator{ + headerKeys: headerKeys, + metrics: metrics, + encodeValue: func(val string) string { + return url.QueryEscape(val) + }, + decodeValue: func(val string) string { + // ignore decoding errors, cannot do anything about them + if v, err := url.QueryUnescape(val); err == nil { + return v + } + return val + }, + } +} + +type binaryPropagator struct { + tracer *Tracer + buffers sync.Pool +} + +func newBinaryPropagator(tracer *Tracer) *binaryPropagator { + return &binaryPropagator{ + tracer: tracer, + buffers: sync.Pool{New: func() interface{} { return &bytes.Buffer{} }}, + } +} + +func (p *textMapPropagator) Inject( + sc SpanContext, + abstractCarrier interface{}, +) error { + textMapWriter, ok := abstractCarrier.(opentracing.TextMapWriter) + if !ok { + return opentracing.ErrInvalidCarrier + } + + // Do not encode the string with trace context to avoid accidental double-encoding + // if people are using opentracing < 0.10.0. Our colon-separated representation + // of the trace context is already safe for HTTP headers. + textMapWriter.Set(p.headerKeys.TraceContextHeaderName, sc.String()) + for k, v := range sc.baggage { + safeKey := p.addBaggageKeyPrefix(k) + safeVal := p.encodeValue(v) + textMapWriter.Set(safeKey, safeVal) + } + return nil +} + +func (p *textMapPropagator) Extract(abstractCarrier interface{}) (SpanContext, error) { + textMapReader, ok := abstractCarrier.(opentracing.TextMapReader) + if !ok { + return emptyContext, opentracing.ErrInvalidCarrier + } + var ctx SpanContext + var baggage map[string]string + err := textMapReader.ForeachKey(func(rawKey, value string) error { + key := strings.ToLower(rawKey) // TODO not necessary for plain TextMap + if key == p.headerKeys.TraceContextHeaderName { + var err error + safeVal := p.decodeValue(value) + if ctx, err = ContextFromString(safeVal); err != nil { + return err + } + } else if key == p.headerKeys.JaegerDebugHeader { + ctx.debugID = p.decodeValue(value) + } else if key == p.headerKeys.JaegerBaggageHeader { + if baggage == nil { + baggage = make(map[string]string) + } + for k, v := range p.parseCommaSeparatedMap(value) { + baggage[k] = v + } + } else if strings.HasPrefix(key, p.headerKeys.TraceBaggageHeaderPrefix) { + if baggage == nil { + baggage = make(map[string]string) + } + safeKey := p.removeBaggageKeyPrefix(key) + safeVal := p.decodeValue(value) + baggage[safeKey] = safeVal + } + return nil + }) + if err != nil { + p.metrics.DecodingErrors.Inc(1) + return emptyContext, err + } + if !ctx.traceID.IsValid() && ctx.debugID == "" && len(baggage) == 0 { + return emptyContext, opentracing.ErrSpanContextNotFound + } + ctx.baggage = baggage + return ctx, nil +} + +func (p *binaryPropagator) Inject( + sc SpanContext, + abstractCarrier interface{}, +) error { + carrier, ok := abstractCarrier.(io.Writer) + if !ok { + return opentracing.ErrInvalidCarrier + } + + // Handle the tracer context + if err := binary.Write(carrier, binary.BigEndian, sc.traceID); err != nil { + return err + } + if err := binary.Write(carrier, binary.BigEndian, sc.spanID); err != nil { + return err + } + if err := binary.Write(carrier, binary.BigEndian, sc.parentID); err != nil { + return err + } + if err := binary.Write(carrier, binary.BigEndian, sc.flags); err != nil { + return err + } + + // Handle the baggage items + if err := binary.Write(carrier, binary.BigEndian, int32(len(sc.baggage))); err != nil { + return err + } + for k, v := range sc.baggage { + if err := binary.Write(carrier, binary.BigEndian, int32(len(k))); err != nil { + return err + } + io.WriteString(carrier, k) + if err := binary.Write(carrier, binary.BigEndian, int32(len(v))); err != nil { + return err + } + io.WriteString(carrier, v) + } + + return nil +} + +func (p *binaryPropagator) Extract(abstractCarrier interface{}) (SpanContext, error) { + carrier, ok := abstractCarrier.(io.Reader) + if !ok { + return emptyContext, opentracing.ErrInvalidCarrier + } + var ctx SpanContext + + if err := binary.Read(carrier, binary.BigEndian, &ctx.traceID); err != nil { + return emptyContext, opentracing.ErrSpanContextCorrupted + } + if err := binary.Read(carrier, binary.BigEndian, &ctx.spanID); err != nil { + return emptyContext, opentracing.ErrSpanContextCorrupted + } + if err := binary.Read(carrier, binary.BigEndian, &ctx.parentID); err != nil { + return emptyContext, opentracing.ErrSpanContextCorrupted + } + if err := binary.Read(carrier, binary.BigEndian, &ctx.flags); err != nil { + return emptyContext, opentracing.ErrSpanContextCorrupted + } + + // Handle the baggage items + var numBaggage int32 + if err := binary.Read(carrier, binary.BigEndian, &numBaggage); err != nil { + return emptyContext, opentracing.ErrSpanContextCorrupted + } + if iNumBaggage := int(numBaggage); iNumBaggage > 0 { + ctx.baggage = make(map[string]string, iNumBaggage) + buf := p.buffers.Get().(*bytes.Buffer) + defer p.buffers.Put(buf) + + var keyLen, valLen int32 + for i := 0; i < iNumBaggage; i++ { + if err := binary.Read(carrier, binary.BigEndian, &keyLen); err != nil { + return emptyContext, opentracing.ErrSpanContextCorrupted + } + buf.Reset() + buf.Grow(int(keyLen)) + if n, err := io.CopyN(buf, carrier, int64(keyLen)); err != nil || int32(n) != keyLen { + return emptyContext, opentracing.ErrSpanContextCorrupted + } + key := buf.String() + + if err := binary.Read(carrier, binary.BigEndian, &valLen); err != nil { + return emptyContext, opentracing.ErrSpanContextCorrupted + } + buf.Reset() + buf.Grow(int(valLen)) + if n, err := io.CopyN(buf, carrier, int64(valLen)); err != nil || int32(n) != valLen { + return emptyContext, opentracing.ErrSpanContextCorrupted + } + ctx.baggage[key] = buf.String() + } + } + + return ctx, nil +} + +// Converts a comma separated key value pair list into a map +// e.g. key1=value1, key2=value2, key3 = value3 +// is converted to map[string]string { "key1" : "value1", +// "key2" : "value2", +// "key3" : "value3" } +func (p *textMapPropagator) parseCommaSeparatedMap(value string) map[string]string { + baggage := make(map[string]string) + value, err := url.QueryUnescape(value) + if err != nil { + log.Printf("Unable to unescape %s, %v", value, err) + return baggage + } + for _, kvpair := range strings.Split(value, ",") { + kv := strings.Split(strings.TrimSpace(kvpair), "=") + if len(kv) == 2 { + baggage[kv[0]] = kv[1] + } else { + log.Printf("Malformed value passed in for %s", p.headerKeys.JaegerBaggageHeader) + } + } + return baggage +} + +// Converts a baggage item key into an http header format, +// by prepending TraceBaggageHeaderPrefix and encoding the key string +func (p *textMapPropagator) addBaggageKeyPrefix(key string) string { + // TODO encodeBaggageKeyAsHeader add caching and escaping + return fmt.Sprintf("%v%v", p.headerKeys.TraceBaggageHeaderPrefix, key) +} + +func (p *textMapPropagator) removeBaggageKeyPrefix(key string) string { + // TODO decodeBaggageHeaderKey add caching and escaping + return key[len(p.headerKeys.TraceBaggageHeaderPrefix):] +} diff --git a/vendor/github.com/uber/jaeger-client-go/reference.go b/vendor/github.com/uber/jaeger-client-go/reference.go new file mode 100644 index 000000000000..5646e78bb2a8 --- /dev/null +++ b/vendor/github.com/uber/jaeger-client-go/reference.go @@ -0,0 +1,23 @@ +// Copyright (c) 2017 Uber Technologies, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package jaeger + +import "github.com/opentracing/opentracing-go" + +// Reference represents a causal reference to other Spans (via their SpanContext). +type Reference struct { + Type opentracing.SpanReferenceType + Context SpanContext +} diff --git a/vendor/github.com/uber/jaeger-client-go/reporter.go b/vendor/github.com/uber/jaeger-client-go/reporter.go new file mode 100644 index 000000000000..fe6288c4b9e5 --- /dev/null +++ b/vendor/github.com/uber/jaeger-client-go/reporter.go @@ -0,0 +1,289 @@ +// Copyright (c) 2017 Uber Technologies, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package jaeger + +import ( + "fmt" + "sync" + "sync/atomic" + "time" + + "github.com/opentracing/opentracing-go" + + "github.com/uber/jaeger-client-go/log" +) + +// Reporter is called by the tracer when a span is completed to report the span to the tracing collector. +type Reporter interface { + // Report submits a new span to collectors, possibly asynchronously and/or with buffering. + Report(span *Span) + + // Close does a clean shutdown of the reporter, flushing any traces that may be buffered in memory. + Close() +} + +// ------------------------------ + +type nullReporter struct{} + +// NewNullReporter creates a no-op reporter that ignores all reported spans. +func NewNullReporter() Reporter { + return &nullReporter{} +} + +// Report implements Report() method of Reporter by doing nothing. +func (r *nullReporter) Report(span *Span) { + // no-op +} + +// Close implements Close() method of Reporter by doing nothing. +func (r *nullReporter) Close() { + // no-op +} + +// ------------------------------ + +type loggingReporter struct { + logger Logger +} + +// NewLoggingReporter creates a reporter that logs all reported spans to provided logger. +func NewLoggingReporter(logger Logger) Reporter { + return &loggingReporter{logger} +} + +// Report implements Report() method of Reporter by logging the span to the logger. +func (r *loggingReporter) Report(span *Span) { + r.logger.Infof("Reporting span %+v", span) +} + +// Close implements Close() method of Reporter by doing nothing. +func (r *loggingReporter) Close() { + // no-op +} + +// ------------------------------ + +// InMemoryReporter is used for testing, and simply collects spans in memory. +type InMemoryReporter struct { + spans []opentracing.Span + lock sync.Mutex +} + +// NewInMemoryReporter creates a reporter that stores spans in memory. +// NOTE: the Tracer should be created with options.PoolSpans = false. +func NewInMemoryReporter() *InMemoryReporter { + return &InMemoryReporter{ + spans: make([]opentracing.Span, 0, 10), + } +} + +// Report implements Report() method of Reporter by storing the span in the buffer. +func (r *InMemoryReporter) Report(span *Span) { + r.lock.Lock() + r.spans = append(r.spans, span) + r.lock.Unlock() +} + +// Close implements Close() method of Reporter by doing nothing. +func (r *InMemoryReporter) Close() { + // no-op +} + +// SpansSubmitted returns the number of spans accumulated in the buffer. +func (r *InMemoryReporter) SpansSubmitted() int { + r.lock.Lock() + defer r.lock.Unlock() + return len(r.spans) +} + +// GetSpans returns accumulated spans as a copy of the buffer. +func (r *InMemoryReporter) GetSpans() []opentracing.Span { + r.lock.Lock() + defer r.lock.Unlock() + copied := make([]opentracing.Span, len(r.spans)) + copy(copied, r.spans) + return copied +} + +// Reset clears all accumulated spans. +func (r *InMemoryReporter) Reset() { + r.lock.Lock() + defer r.lock.Unlock() + r.spans = nil +} + +// ------------------------------ + +type compositeReporter struct { + reporters []Reporter +} + +// NewCompositeReporter creates a reporter that ignores all reported spans. +func NewCompositeReporter(reporters ...Reporter) Reporter { + return &compositeReporter{reporters: reporters} +} + +// Report implements Report() method of Reporter by delegating to each underlying reporter. +func (r *compositeReporter) Report(span *Span) { + for _, reporter := range r.reporters { + reporter.Report(span) + } +} + +// Close implements Close() method of Reporter by closing each underlying reporter. +func (r *compositeReporter) Close() { + for _, reporter := range r.reporters { + reporter.Close() + } +} + +// ------------- REMOTE REPORTER ----------------- + +type reporterQueueItemType int + +const ( + defaultQueueSize = 100 + defaultBufferFlushInterval = 1 * time.Second + + reporterQueueItemSpan reporterQueueItemType = iota + reporterQueueItemClose +) + +type reporterQueueItem struct { + itemType reporterQueueItemType + span *Span + close *sync.WaitGroup +} + +type remoteReporter struct { + // These fields must be first in the struct because `sync/atomic` expects 64-bit alignment. + // Cf. https://github.com/uber/jaeger-client-go/issues/155, https://goo.gl/zW7dgq + queueLength int64 + closed int64 // 0 - not closed, 1 - closed + + reporterOptions + + sender Transport + queue chan reporterQueueItem +} + +// NewRemoteReporter creates a new reporter that sends spans out of process by means of Sender. +// Calls to Report(Span) return immediately (side effect: if internal buffer is full the span is dropped). +// Periodically the transport buffer is flushed even if it hasn't reached max packet size. +// Calls to Close() block until all spans reported prior to the call to Close are flushed. +func NewRemoteReporter(sender Transport, opts ...ReporterOption) Reporter { + options := reporterOptions{} + for _, option := range opts { + option(&options) + } + if options.bufferFlushInterval <= 0 { + options.bufferFlushInterval = defaultBufferFlushInterval + } + if options.logger == nil { + options.logger = log.NullLogger + } + if options.metrics == nil { + options.metrics = NewNullMetrics() + } + if options.queueSize <= 0 { + options.queueSize = defaultQueueSize + } + reporter := &remoteReporter{ + reporterOptions: options, + sender: sender, + queue: make(chan reporterQueueItem, options.queueSize), + } + go reporter.processQueue() + return reporter +} + +// Report implements Report() method of Reporter. +// It passes the span to a background go-routine for submission to Jaeger backend. +// If the internal queue is full, the span is dropped and metrics.ReporterDropped counter is incremented. +// If Report() is called after the reporter has been Close()-ed, the additional spans will not be +// sent to the backend, but the metrics.ReporterDropped counter may not reflect them correctly, +// because some of them may still be successfully added to the queue. +func (r *remoteReporter) Report(span *Span) { + select { + case r.queue <- reporterQueueItem{itemType: reporterQueueItemSpan, span: span}: + atomic.AddInt64(&r.queueLength, 1) + default: + r.metrics.ReporterDropped.Inc(1) + } +} + +// Close implements Close() method of Reporter by waiting for the queue to be drained. +func (r *remoteReporter) Close() { + if swapped := atomic.CompareAndSwapInt64(&r.closed, 0, 1); !swapped { + r.logger.Error("Repeated attempt to close the reporter is ignored") + return + } + r.sendCloseEvent() + r.sender.Close() +} + +func (r *remoteReporter) sendCloseEvent() { + wg := &sync.WaitGroup{} + wg.Add(1) + item := reporterQueueItem{itemType: reporterQueueItemClose, close: wg} + + r.queue <- item // if the queue is full we will block until there is space + atomic.AddInt64(&r.queueLength, 1) + wg.Wait() +} + +// processQueue reads spans from the queue, converts them to Thrift, and stores them in an internal buffer. +// When the buffer length reaches batchSize, it is flushed by submitting the accumulated spans to Jaeger. +// Buffer also gets flushed automatically every batchFlushInterval seconds, just in case the tracer stopped +// reporting new spans. +func (r *remoteReporter) processQueue() { + // flush causes the Sender to flush its accumulated spans and clear the buffer + flush := func() { + if flushed, err := r.sender.Flush(); err != nil { + r.metrics.ReporterFailure.Inc(int64(flushed)) + r.logger.Error(fmt.Sprintf("error when flushing the buffer: %s", err.Error())) + } else if flushed > 0 { + r.metrics.ReporterSuccess.Inc(int64(flushed)) + } + } + + timer := time.NewTicker(r.bufferFlushInterval) + for { + select { + case <-timer.C: + flush() + case item := <-r.queue: + atomic.AddInt64(&r.queueLength, -1) + switch item.itemType { + case reporterQueueItemSpan: + span := item.span + if flushed, err := r.sender.Append(span); err != nil { + r.metrics.ReporterFailure.Inc(int64(flushed)) + r.logger.Error(fmt.Sprintf("error reporting span %q: %s", span.OperationName(), err.Error())) + } else if flushed > 0 { + r.metrics.ReporterSuccess.Inc(int64(flushed)) + // to reduce the number of gauge stats, we only emit queue length on flush + r.metrics.ReporterQueueLength.Update(atomic.LoadInt64(&r.queueLength)) + } + case reporterQueueItemClose: + timer.Stop() + flush() + item.close.Done() + return + } + } + } +} diff --git a/vendor/github.com/uber/jaeger-client-go/reporter_options.go b/vendor/github.com/uber/jaeger-client-go/reporter_options.go new file mode 100644 index 000000000000..65012d7015dc --- /dev/null +++ b/vendor/github.com/uber/jaeger-client-go/reporter_options.go @@ -0,0 +1,69 @@ +// Copyright (c) 2017 Uber Technologies, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package jaeger + +import ( + "time" +) + +// ReporterOption is a function that sets some option on the reporter. +type ReporterOption func(c *reporterOptions) + +// ReporterOptions is a factory for all available ReporterOption's +var ReporterOptions reporterOptions + +// reporterOptions control behavior of the reporter. +type reporterOptions struct { + // queueSize is the size of internal queue where reported spans are stored before they are processed in the background + queueSize int + // bufferFlushInterval is how often the buffer is force-flushed, even if it's not full + bufferFlushInterval time.Duration + // logger is used to log errors of span submissions + logger Logger + // metrics is used to record runtime stats + metrics *Metrics +} + +// QueueSize creates a ReporterOption that sets the size of the internal queue where +// spans are stored before they are processed. +func (reporterOptions) QueueSize(queueSize int) ReporterOption { + return func(r *reporterOptions) { + r.queueSize = queueSize + } +} + +// Metrics creates a ReporterOption that initializes Metrics in the reporter, +// which is used to record runtime statistics. +func (reporterOptions) Metrics(metrics *Metrics) ReporterOption { + return func(r *reporterOptions) { + r.metrics = metrics + } +} + +// BufferFlushInterval creates a ReporterOption that sets how often the queue +// is force-flushed. +func (reporterOptions) BufferFlushInterval(bufferFlushInterval time.Duration) ReporterOption { + return func(r *reporterOptions) { + r.bufferFlushInterval = bufferFlushInterval + } +} + +// Logger creates a ReporterOption that initializes the logger used to log +// errors of span submissions. +func (reporterOptions) Logger(logger Logger) ReporterOption { + return func(r *reporterOptions) { + r.logger = logger + } +} diff --git a/vendor/github.com/uber/jaeger-client-go/rpcmetrics/README.md b/vendor/github.com/uber/jaeger-client-go/rpcmetrics/README.md new file mode 100644 index 000000000000..879948e9c9cd --- /dev/null +++ b/vendor/github.com/uber/jaeger-client-go/rpcmetrics/README.md @@ -0,0 +1,5 @@ +An Observer that can be used to emit RPC metrics +================================================ + +It can be attached to the tracer during tracer construction. +See `ExampleObserver` function in [observer_test.go](./observer_test.go). diff --git a/vendor/github.com/uber/jaeger-client-go/rpcmetrics/doc.go b/vendor/github.com/uber/jaeger-client-go/rpcmetrics/doc.go new file mode 100644 index 000000000000..51aa11b350e5 --- /dev/null +++ b/vendor/github.com/uber/jaeger-client-go/rpcmetrics/doc.go @@ -0,0 +1,16 @@ +// Copyright (c) 2017 Uber Technologies, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package rpcmetrics implements an Observer that can be used to emit RPC metrics. +package rpcmetrics diff --git a/vendor/github.com/uber/jaeger-client-go/rpcmetrics/endpoints.go b/vendor/github.com/uber/jaeger-client-go/rpcmetrics/endpoints.go new file mode 100644 index 000000000000..30555243d0c0 --- /dev/null +++ b/vendor/github.com/uber/jaeger-client-go/rpcmetrics/endpoints.go @@ -0,0 +1,63 @@ +// Copyright (c) 2017 Uber Technologies, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package rpcmetrics + +import "sync" + +// normalizedEndpoints is a cache for endpointName -> safeName mappings. +type normalizedEndpoints struct { + names map[string]string + maxSize int + defaultName string + normalizer NameNormalizer + mux sync.RWMutex +} + +func newNormalizedEndpoints(maxSize int, normalizer NameNormalizer) *normalizedEndpoints { + return &normalizedEndpoints{ + maxSize: maxSize, + normalizer: normalizer, + names: make(map[string]string, maxSize), + } +} + +// normalize looks up the name in the cache, if not found it uses normalizer +// to convert the name to a safe name. If called with more than maxSize unique +// names it returns "" for all other names beyond those already cached. +func (n *normalizedEndpoints) normalize(name string) string { + n.mux.RLock() + norm, ok := n.names[name] + l := len(n.names) + n.mux.RUnlock() + if ok { + return norm + } + if l >= n.maxSize { + return "" + } + return n.normalizeWithLock(name) +} + +func (n *normalizedEndpoints) normalizeWithLock(name string) string { + norm := n.normalizer.Normalize(name) + n.mux.Lock() + defer n.mux.Unlock() + // cache may have grown while we were not holding the lock + if len(n.names) >= n.maxSize { + return "" + } + n.names[name] = norm + return norm +} diff --git a/vendor/github.com/uber/jaeger-client-go/rpcmetrics/metrics.go b/vendor/github.com/uber/jaeger-client-go/rpcmetrics/metrics.go new file mode 100644 index 000000000000..ab8d74c29185 --- /dev/null +++ b/vendor/github.com/uber/jaeger-client-go/rpcmetrics/metrics.go @@ -0,0 +1,124 @@ +// Copyright (c) 2017 Uber Technologies, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package rpcmetrics + +import ( + "sync" + + "github.com/uber/jaeger-lib/metrics" +) + +const ( + otherEndpointsPlaceholder = "other" + endpointNameMetricTag = "endpoint" +) + +// Metrics is a collection of metrics for an endpoint describing +// throughput, success, errors, and performance. +type Metrics struct { + // RequestCountSuccess is a counter of the total number of successes. + RequestCountSuccess metrics.Counter `metric:"requests" tags:"error=false"` + + // RequestCountFailures is a counter of the number of times any failure has been observed. + RequestCountFailures metrics.Counter `metric:"requests" tags:"error=true"` + + // RequestLatencySuccess is a latency histogram of succesful requests. + RequestLatencySuccess metrics.Timer `metric:"request_latency" tags:"error=false"` + + // RequestLatencyFailures is a latency histogram of failed requests. + RequestLatencyFailures metrics.Timer `metric:"request_latency" tags:"error=true"` + + // HTTPStatusCode2xx is a counter of the total number of requests with HTTP status code 200-299 + HTTPStatusCode2xx metrics.Counter `metric:"http_requests" tags:"status_code=2xx"` + + // HTTPStatusCode3xx is a counter of the total number of requests with HTTP status code 300-399 + HTTPStatusCode3xx metrics.Counter `metric:"http_requests" tags:"status_code=3xx"` + + // HTTPStatusCode4xx is a counter of the total number of requests with HTTP status code 400-499 + HTTPStatusCode4xx metrics.Counter `metric:"http_requests" tags:"status_code=4xx"` + + // HTTPStatusCode5xx is a counter of the total number of requests with HTTP status code 500-599 + HTTPStatusCode5xx metrics.Counter `metric:"http_requests" tags:"status_code=5xx"` +} + +func (m *Metrics) recordHTTPStatusCode(statusCode uint16) { + if statusCode >= 200 && statusCode < 300 { + m.HTTPStatusCode2xx.Inc(1) + } else if statusCode >= 300 && statusCode < 400 { + m.HTTPStatusCode3xx.Inc(1) + } else if statusCode >= 400 && statusCode < 500 { + m.HTTPStatusCode4xx.Inc(1) + } else if statusCode >= 500 && statusCode < 600 { + m.HTTPStatusCode5xx.Inc(1) + } +} + +// MetricsByEndpoint is a registry/cache of metrics for each unique endpoint name. +// Only maxNumberOfEndpoints Metrics are stored, all other endpoint names are mapped +// to a generic endpoint name "other". +type MetricsByEndpoint struct { + metricsFactory metrics.Factory + endpoints *normalizedEndpoints + metricsByEndpoint map[string]*Metrics + mux sync.RWMutex +} + +func newMetricsByEndpoint( + metricsFactory metrics.Factory, + normalizer NameNormalizer, + maxNumberOfEndpoints int, +) *MetricsByEndpoint { + return &MetricsByEndpoint{ + metricsFactory: metricsFactory, + endpoints: newNormalizedEndpoints(maxNumberOfEndpoints, normalizer), + metricsByEndpoint: make(map[string]*Metrics, maxNumberOfEndpoints+1), // +1 for "other" + } +} + +func (m *MetricsByEndpoint) get(endpoint string) *Metrics { + safeName := m.endpoints.normalize(endpoint) + if safeName == "" { + safeName = otherEndpointsPlaceholder + } + m.mux.RLock() + met := m.metricsByEndpoint[safeName] + m.mux.RUnlock() + if met != nil { + return met + } + + return m.getWithWriteLock(safeName) +} + +// split to make easier to test +func (m *MetricsByEndpoint) getWithWriteLock(safeName string) *Metrics { + m.mux.Lock() + defer m.mux.Unlock() + + // it is possible that the name has been already registered after we released + // the read lock and before we grabbed the write lock, so check for that. + if met, ok := m.metricsByEndpoint[safeName]; ok { + return met + } + + // it would be nice to create the struct before locking, since Init() is somewhat + // expensive, however some metrics backends (e.g. expvar) may not like duplicate metrics. + met := &Metrics{} + tags := map[string]string{endpointNameMetricTag: safeName} + metrics.Init(met, m.metricsFactory, tags) + + m.metricsByEndpoint[safeName] = met + return met +} diff --git a/vendor/github.com/uber/jaeger-client-go/rpcmetrics/normalizer.go b/vendor/github.com/uber/jaeger-client-go/rpcmetrics/normalizer.go new file mode 100644 index 000000000000..148d84b3a1a6 --- /dev/null +++ b/vendor/github.com/uber/jaeger-client-go/rpcmetrics/normalizer.go @@ -0,0 +1,101 @@ +// Copyright (c) 2017 Uber Technologies, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package rpcmetrics + +// NameNormalizer is used to convert the endpoint names to strings +// that can be safely used as tags in the metrics. +type NameNormalizer interface { + Normalize(name string) string +} + +// DefaultNameNormalizer converts endpoint names so that they contain only characters +// from the safe charset [a-zA-Z0-9-./_]. All other characters are replaced with '-'. +var DefaultNameNormalizer = &SimpleNameNormalizer{ + SafeSets: []SafeCharacterSet{ + &Range{From: 'a', To: 'z'}, + &Range{From: 'A', To: 'Z'}, + &Range{From: '0', To: '9'}, + &Char{'-'}, + &Char{'_'}, + &Char{'/'}, + &Char{'.'}, + }, + Replacement: '-', +} + +// SimpleNameNormalizer uses a set of safe character sets. +type SimpleNameNormalizer struct { + SafeSets []SafeCharacterSet + Replacement byte +} + +// SafeCharacterSet determines if the given character is "safe" +type SafeCharacterSet interface { + IsSafe(c byte) bool +} + +// Range implements SafeCharacterSet +type Range struct { + From, To byte +} + +// IsSafe implements SafeCharacterSet +func (r *Range) IsSafe(c byte) bool { + return c >= r.From && c <= r.To +} + +// Char implements SafeCharacterSet +type Char struct { + Val byte +} + +// IsSafe implements SafeCharacterSet +func (ch *Char) IsSafe(c byte) bool { + return c == ch.Val +} + +// Normalize checks each character in the string against SafeSets, +// and if it's not safe substitutes it with Replacement. +func (n *SimpleNameNormalizer) Normalize(name string) string { + var retMe []byte + nameBytes := []byte(name) + for i, b := range nameBytes { + if n.safeByte(b) { + if retMe != nil { + retMe[i] = b + } + } else { + if retMe == nil { + retMe = make([]byte, len(nameBytes)) + copy(retMe[0:i], nameBytes[0:i]) + } + retMe[i] = n.Replacement + } + } + if retMe == nil { + return name + } + return string(retMe) +} + +// safeByte checks if b against all safe charsets. +func (n *SimpleNameNormalizer) safeByte(b byte) bool { + for i := range n.SafeSets { + if n.SafeSets[i].IsSafe(b) { + return true + } + } + return false +} diff --git a/vendor/github.com/uber/jaeger-client-go/rpcmetrics/observer.go b/vendor/github.com/uber/jaeger-client-go/rpcmetrics/observer.go new file mode 100644 index 000000000000..eca5ff6f3b98 --- /dev/null +++ b/vendor/github.com/uber/jaeger-client-go/rpcmetrics/observer.go @@ -0,0 +1,171 @@ +// Copyright (c) 2017 Uber Technologies, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package rpcmetrics + +import ( + "strconv" + "sync" + "time" + + "github.com/opentracing/opentracing-go" + "github.com/opentracing/opentracing-go/ext" + "github.com/uber/jaeger-lib/metrics" + + jaeger "github.com/uber/jaeger-client-go" +) + +const defaultMaxNumberOfEndpoints = 200 + +// Observer is an observer that can emit RPC metrics. +type Observer struct { + metricsByEndpoint *MetricsByEndpoint +} + +// NewObserver creates a new observer that can emit RPC metrics. +func NewObserver(metricsFactory metrics.Factory, normalizer NameNormalizer) *Observer { + return &Observer{ + metricsByEndpoint: newMetricsByEndpoint( + metricsFactory, + normalizer, + defaultMaxNumberOfEndpoints, + ), + } +} + +// OnStartSpan creates a new Observer for the span. +func (o *Observer) OnStartSpan( + operationName string, + options opentracing.StartSpanOptions, +) jaeger.SpanObserver { + return NewSpanObserver(o.metricsByEndpoint, operationName, options) +} + +// SpanKind identifies the span as inboud, outbound, or internal +type SpanKind int + +const ( + // Local span kind + Local SpanKind = iota + // Inbound span kind + Inbound + // Outbound span kind + Outbound +) + +// SpanObserver collects RPC metrics +type SpanObserver struct { + metricsByEndpoint *MetricsByEndpoint + operationName string + startTime time.Time + mux sync.Mutex + kind SpanKind + httpStatusCode uint16 + err bool +} + +// NewSpanObserver creates a new SpanObserver that can emit RPC metrics. +func NewSpanObserver( + metricsByEndpoint *MetricsByEndpoint, + operationName string, + options opentracing.StartSpanOptions, +) *SpanObserver { + so := &SpanObserver{ + metricsByEndpoint: metricsByEndpoint, + operationName: operationName, + startTime: options.StartTime, + } + for k, v := range options.Tags { + so.handleTagInLock(k, v) + } + return so +} + +// handleTags watches for special tags +// - SpanKind +// - HttpStatusCode +// - Error +func (so *SpanObserver) handleTagInLock(key string, value interface{}) { + if key == string(ext.SpanKind) { + if v, ok := value.(ext.SpanKindEnum); ok { + value = string(v) + } + if v, ok := value.(string); ok { + if v == string(ext.SpanKindRPCClientEnum) { + so.kind = Outbound + } else if v == string(ext.SpanKindRPCServerEnum) { + so.kind = Inbound + } + } + return + } + if key == string(ext.HTTPStatusCode) { + if v, ok := value.(uint16); ok { + so.httpStatusCode = v + } else if v, ok := value.(int); ok { + so.httpStatusCode = uint16(v) + } else if v, ok := value.(string); ok { + if vv, err := strconv.Atoi(v); err == nil { + so.httpStatusCode = uint16(vv) + } + } + return + } + if key == string(ext.Error) { + if v, ok := value.(bool); ok { + so.err = v + } else if v, ok := value.(string); ok { + if vv, err := strconv.ParseBool(v); err == nil { + so.err = vv + } + } + return + } +} + +// OnFinish emits the RPC metrics. It only has an effect when operation name +// is not blank, and the span kind is an RPC server. +func (so *SpanObserver) OnFinish(options opentracing.FinishOptions) { + so.mux.Lock() + defer so.mux.Unlock() + + if so.operationName == "" || so.kind != Inbound { + return + } + + mets := so.metricsByEndpoint.get(so.operationName) + latency := options.FinishTime.Sub(so.startTime) + if so.err { + mets.RequestCountFailures.Inc(1) + mets.RequestLatencyFailures.Record(latency) + } else { + mets.RequestCountSuccess.Inc(1) + mets.RequestLatencySuccess.Record(latency) + } + mets.recordHTTPStatusCode(so.httpStatusCode) +} + +// OnSetOperationName records new operation name. +func (so *SpanObserver) OnSetOperationName(operationName string) { + so.mux.Lock() + so.operationName = operationName + so.mux.Unlock() +} + +// OnSetTag implements SpanObserver +func (so *SpanObserver) OnSetTag(key string, value interface{}) { + so.mux.Lock() + so.handleTagInLock(key, value) + so.mux.Unlock() +} diff --git a/vendor/github.com/uber/jaeger-client-go/sampler.go b/vendor/github.com/uber/jaeger-client-go/sampler.go new file mode 100644 index 000000000000..e6a32b3837a6 --- /dev/null +++ b/vendor/github.com/uber/jaeger-client-go/sampler.go @@ -0,0 +1,556 @@ +// Copyright (c) 2017 Uber Technologies, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package jaeger + +import ( + "fmt" + "math" + "net/url" + "sync" + "sync/atomic" + "time" + + "github.com/uber/jaeger-client-go/log" + "github.com/uber/jaeger-client-go/thrift-gen/sampling" + "github.com/uber/jaeger-client-go/utils" +) + +const ( + defaultSamplingServerURL = "http://localhost:5778/sampling" + defaultSamplingRefreshInterval = time.Minute + defaultMaxOperations = 2000 +) + +// Sampler decides whether a new trace should be sampled or not. +type Sampler interface { + // IsSampled decides whether a trace with given `id` and `operation` + // should be sampled. This function will also return the tags that + // can be used to identify the type of sampling that was applied to + // the root span. Most simple samplers would return two tags, + // sampler.type and sampler.param, similar to those used in the Configuration + IsSampled(id TraceID, operation string) (sampled bool, tags []Tag) + + // Close does a clean shutdown of the sampler, stopping any background + // go-routines it may have started. + Close() + + // Equal checks if the `other` sampler is functionally equivalent + // to this sampler. + // TODO remove this function. This function is used to determine if 2 samplers are equivalent + // which does not bode well with the adaptive sampler which has to create all the composite samplers + // for the comparison to occur. This is expensive to do if only one sampler has changed. + Equal(other Sampler) bool +} + +// ----------------------- + +// ConstSampler is a sampler that always makes the same decision. +type ConstSampler struct { + Decision bool + tags []Tag +} + +// NewConstSampler creates a ConstSampler. +func NewConstSampler(sample bool) Sampler { + tags := []Tag{ + {key: SamplerTypeTagKey, value: SamplerTypeConst}, + {key: SamplerParamTagKey, value: sample}, + } + return &ConstSampler{Decision: sample, tags: tags} +} + +// IsSampled implements IsSampled() of Sampler. +func (s *ConstSampler) IsSampled(id TraceID, operation string) (bool, []Tag) { + return s.Decision, s.tags +} + +// Close implements Close() of Sampler. +func (s *ConstSampler) Close() { + // nothing to do +} + +// Equal implements Equal() of Sampler. +func (s *ConstSampler) Equal(other Sampler) bool { + if o, ok := other.(*ConstSampler); ok { + return s.Decision == o.Decision + } + return false +} + +// ----------------------- + +// ProbabilisticSampler is a sampler that randomly samples a certain percentage +// of traces. +type ProbabilisticSampler struct { + samplingRate float64 + samplingBoundary uint64 + tags []Tag +} + +const maxRandomNumber = ^(uint64(1) << 63) // i.e. 0x7fffffffffffffff + +// NewProbabilisticSampler creates a sampler that randomly samples a certain percentage of traces specified by the +// samplingRate, in the range between 0.0 and 1.0. +// +// It relies on the fact that new trace IDs are 63bit random numbers themselves, thus making the sampling decision +// without generating a new random number, but simply calculating if traceID < (samplingRate * 2^63). +// TODO remove the error from this function for next major release +func NewProbabilisticSampler(samplingRate float64) (*ProbabilisticSampler, error) { + if samplingRate < 0.0 || samplingRate > 1.0 { + return nil, fmt.Errorf("Sampling Rate must be between 0.0 and 1.0, received %f", samplingRate) + } + return newProbabilisticSampler(samplingRate), nil +} + +func newProbabilisticSampler(samplingRate float64) *ProbabilisticSampler { + samplingRate = math.Max(0.0, math.Min(samplingRate, 1.0)) + tags := []Tag{ + {key: SamplerTypeTagKey, value: SamplerTypeProbabilistic}, + {key: SamplerParamTagKey, value: samplingRate}, + } + return &ProbabilisticSampler{ + samplingRate: samplingRate, + samplingBoundary: uint64(float64(maxRandomNumber) * samplingRate), + tags: tags, + } +} + +// SamplingRate returns the sampling probability this sampled was constructed with. +func (s *ProbabilisticSampler) SamplingRate() float64 { + return s.samplingRate +} + +// IsSampled implements IsSampled() of Sampler. +func (s *ProbabilisticSampler) IsSampled(id TraceID, operation string) (bool, []Tag) { + return s.samplingBoundary >= id.Low, s.tags +} + +// Close implements Close() of Sampler. +func (s *ProbabilisticSampler) Close() { + // nothing to do +} + +// Equal implements Equal() of Sampler. +func (s *ProbabilisticSampler) Equal(other Sampler) bool { + if o, ok := other.(*ProbabilisticSampler); ok { + return s.samplingBoundary == o.samplingBoundary + } + return false +} + +// ----------------------- + +type rateLimitingSampler struct { + maxTracesPerSecond float64 + rateLimiter utils.RateLimiter + tags []Tag +} + +// NewRateLimitingSampler creates a sampler that samples at most maxTracesPerSecond. The distribution of sampled +// traces follows burstiness of the service, i.e. a service with uniformly distributed requests will have those +// requests sampled uniformly as well, but if requests are bursty, especially sub-second, then a number of +// sequential requests can be sampled each second. +func NewRateLimitingSampler(maxTracesPerSecond float64) Sampler { + tags := []Tag{ + {key: SamplerTypeTagKey, value: SamplerTypeRateLimiting}, + {key: SamplerParamTagKey, value: maxTracesPerSecond}, + } + return &rateLimitingSampler{ + maxTracesPerSecond: maxTracesPerSecond, + rateLimiter: utils.NewRateLimiter(maxTracesPerSecond, math.Max(maxTracesPerSecond, 1.0)), + tags: tags, + } +} + +// IsSampled implements IsSampled() of Sampler. +func (s *rateLimitingSampler) IsSampled(id TraceID, operation string) (bool, []Tag) { + return s.rateLimiter.CheckCredit(1.0), s.tags +} + +func (s *rateLimitingSampler) Close() { + // nothing to do +} + +func (s *rateLimitingSampler) Equal(other Sampler) bool { + if o, ok := other.(*rateLimitingSampler); ok { + return s.maxTracesPerSecond == o.maxTracesPerSecond + } + return false +} + +// ----------------------- + +// GuaranteedThroughputProbabilisticSampler is a sampler that leverages both probabilisticSampler and +// rateLimitingSampler. The rateLimitingSampler is used as a guaranteed lower bound sampler such that +// every operation is sampled at least once in a time interval defined by the lowerBound. ie a lowerBound +// of 1.0 / (60 * 10) will sample an operation at least once every 10 minutes. +// +// The probabilisticSampler is given higher priority when tags are emitted, ie. if IsSampled() for both +// samplers return true, the tags for probabilisticSampler will be used. +type GuaranteedThroughputProbabilisticSampler struct { + probabilisticSampler *ProbabilisticSampler + lowerBoundSampler Sampler + tags []Tag + samplingRate float64 + lowerBound float64 +} + +// NewGuaranteedThroughputProbabilisticSampler returns a delegating sampler that applies both +// probabilisticSampler and rateLimitingSampler. +func NewGuaranteedThroughputProbabilisticSampler( + lowerBound, samplingRate float64, +) (*GuaranteedThroughputProbabilisticSampler, error) { + return newGuaranteedThroughputProbabilisticSampler(lowerBound, samplingRate), nil +} + +func newGuaranteedThroughputProbabilisticSampler(lowerBound, samplingRate float64) *GuaranteedThroughputProbabilisticSampler { + s := &GuaranteedThroughputProbabilisticSampler{ + lowerBoundSampler: NewRateLimitingSampler(lowerBound), + lowerBound: lowerBound, + } + s.setProbabilisticSampler(samplingRate) + return s +} + +func (s *GuaranteedThroughputProbabilisticSampler) setProbabilisticSampler(samplingRate float64) { + if s.probabilisticSampler == nil || s.samplingRate != samplingRate { + s.probabilisticSampler = newProbabilisticSampler(samplingRate) + s.samplingRate = s.probabilisticSampler.SamplingRate() + s.tags = []Tag{ + {key: SamplerTypeTagKey, value: SamplerTypeLowerBound}, + {key: SamplerParamTagKey, value: s.samplingRate}, + } + } +} + +// IsSampled implements IsSampled() of Sampler. +func (s *GuaranteedThroughputProbabilisticSampler) IsSampled(id TraceID, operation string) (bool, []Tag) { + if sampled, tags := s.probabilisticSampler.IsSampled(id, operation); sampled { + s.lowerBoundSampler.IsSampled(id, operation) + return true, tags + } + sampled, _ := s.lowerBoundSampler.IsSampled(id, operation) + return sampled, s.tags +} + +// Close implements Close() of Sampler. +func (s *GuaranteedThroughputProbabilisticSampler) Close() { + s.probabilisticSampler.Close() + s.lowerBoundSampler.Close() +} + +// Equal implements Equal() of Sampler. +func (s *GuaranteedThroughputProbabilisticSampler) Equal(other Sampler) bool { + // NB The Equal() function is expensive and will be removed. See adaptiveSampler.Equal() for + // more information. + return false +} + +// this function should only be called while holding a Write lock +func (s *GuaranteedThroughputProbabilisticSampler) update(lowerBound, samplingRate float64) { + s.setProbabilisticSampler(samplingRate) + if s.lowerBound != lowerBound { + s.lowerBoundSampler = NewRateLimitingSampler(lowerBound) + s.lowerBound = lowerBound + } +} + +// ----------------------- + +type adaptiveSampler struct { + sync.RWMutex + + samplers map[string]*GuaranteedThroughputProbabilisticSampler + defaultSampler *ProbabilisticSampler + lowerBound float64 + maxOperations int +} + +// NewAdaptiveSampler returns a delegating sampler that applies both probabilisticSampler and +// rateLimitingSampler via the guaranteedThroughputProbabilisticSampler. This sampler keeps track of all +// operations and delegates calls to the respective guaranteedThroughputProbabilisticSampler. +func NewAdaptiveSampler(strategies *sampling.PerOperationSamplingStrategies, maxOperations int) (Sampler, error) { + return newAdaptiveSampler(strategies, maxOperations), nil +} + +func newAdaptiveSampler(strategies *sampling.PerOperationSamplingStrategies, maxOperations int) Sampler { + samplers := make(map[string]*GuaranteedThroughputProbabilisticSampler) + for _, strategy := range strategies.PerOperationStrategies { + sampler := newGuaranteedThroughputProbabilisticSampler( + strategies.DefaultLowerBoundTracesPerSecond, + strategy.ProbabilisticSampling.SamplingRate, + ) + samplers[strategy.Operation] = sampler + } + return &adaptiveSampler{ + samplers: samplers, + defaultSampler: newProbabilisticSampler(strategies.DefaultSamplingProbability), + lowerBound: strategies.DefaultLowerBoundTracesPerSecond, + maxOperations: maxOperations, + } +} + +func (s *adaptiveSampler) IsSampled(id TraceID, operation string) (bool, []Tag) { + s.RLock() + sampler, ok := s.samplers[operation] + if ok { + defer s.RUnlock() + return sampler.IsSampled(id, operation) + } + s.RUnlock() + s.Lock() + defer s.Unlock() + + // Check if sampler has already been created + sampler, ok = s.samplers[operation] + if ok { + return sampler.IsSampled(id, operation) + } + // Store only up to maxOperations of unique ops. + if len(s.samplers) >= s.maxOperations { + return s.defaultSampler.IsSampled(id, operation) + } + newSampler := newGuaranteedThroughputProbabilisticSampler(s.lowerBound, s.defaultSampler.SamplingRate()) + s.samplers[operation] = newSampler + return newSampler.IsSampled(id, operation) +} + +func (s *adaptiveSampler) Close() { + s.Lock() + defer s.Unlock() + for _, sampler := range s.samplers { + sampler.Close() + } + s.defaultSampler.Close() +} + +func (s *adaptiveSampler) Equal(other Sampler) bool { + // NB The Equal() function is overly expensive for adaptiveSampler since it's composed of multiple + // samplers which all need to be initialized before this function can be called for a comparison. + // Therefore, adaptiveSampler uses the update() function to only alter the samplers that need + // changing. Hence this function always returns false so that the update function can be called. + // Once the Equal() function is removed from the Sampler API, this will no longer be needed. + return false +} + +func (s *adaptiveSampler) update(strategies *sampling.PerOperationSamplingStrategies) { + s.Lock() + defer s.Unlock() + for _, strategy := range strategies.PerOperationStrategies { + operation := strategy.Operation + samplingRate := strategy.ProbabilisticSampling.SamplingRate + lowerBound := strategies.DefaultLowerBoundTracesPerSecond + if sampler, ok := s.samplers[operation]; ok { + sampler.update(lowerBound, samplingRate) + } else { + sampler := newGuaranteedThroughputProbabilisticSampler( + lowerBound, + samplingRate, + ) + s.samplers[operation] = sampler + } + } + s.lowerBound = strategies.DefaultLowerBoundTracesPerSecond + if s.defaultSampler.SamplingRate() != strategies.DefaultSamplingProbability { + s.defaultSampler = newProbabilisticSampler(strategies.DefaultSamplingProbability) + } +} + +// ----------------------- + +// RemotelyControlledSampler is a delegating sampler that polls a remote server +// for the appropriate sampling strategy, constructs a corresponding sampler and +// delegates to it for sampling decisions. +type RemotelyControlledSampler struct { + // These fields must be first in the struct because `sync/atomic` expects 64-bit alignment. + // Cf. https://github.com/uber/jaeger-client-go/issues/155, https://goo.gl/zW7dgq + closed int64 // 0 - not closed, 1 - closed + + sync.RWMutex + samplerOptions + + serviceName string + manager sampling.SamplingManager + doneChan chan *sync.WaitGroup +} + +type httpSamplingManager struct { + serverURL string +} + +func (s *httpSamplingManager) GetSamplingStrategy(serviceName string) (*sampling.SamplingStrategyResponse, error) { + var out sampling.SamplingStrategyResponse + v := url.Values{} + v.Set("service", serviceName) + if err := utils.GetJSON(s.serverURL+"?"+v.Encode(), &out); err != nil { + return nil, err + } + return &out, nil +} + +// NewRemotelyControlledSampler creates a sampler that periodically pulls +// the sampling strategy from an HTTP sampling server (e.g. jaeger-agent). +func NewRemotelyControlledSampler( + serviceName string, + opts ...SamplerOption, +) *RemotelyControlledSampler { + options := applySamplerOptions(opts...) + sampler := &RemotelyControlledSampler{ + samplerOptions: options, + serviceName: serviceName, + manager: &httpSamplingManager{serverURL: options.samplingServerURL}, + doneChan: make(chan *sync.WaitGroup), + } + go sampler.pollController() + return sampler +} + +func applySamplerOptions(opts ...SamplerOption) samplerOptions { + options := samplerOptions{} + for _, option := range opts { + option(&options) + } + if options.sampler == nil { + options.sampler = newProbabilisticSampler(0.001) + } + if options.logger == nil { + options.logger = log.NullLogger + } + if options.maxOperations <= 0 { + options.maxOperations = defaultMaxOperations + } + if options.samplingServerURL == "" { + options.samplingServerURL = defaultSamplingServerURL + } + if options.metrics == nil { + options.metrics = NewNullMetrics() + } + if options.samplingRefreshInterval <= 0 { + options.samplingRefreshInterval = defaultSamplingRefreshInterval + } + return options +} + +// IsSampled implements IsSampled() of Sampler. +func (s *RemotelyControlledSampler) IsSampled(id TraceID, operation string) (bool, []Tag) { + s.RLock() + defer s.RUnlock() + return s.sampler.IsSampled(id, operation) +} + +// Close implements Close() of Sampler. +func (s *RemotelyControlledSampler) Close() { + if swapped := atomic.CompareAndSwapInt64(&s.closed, 0, 1); !swapped { + s.logger.Error("Repeated attempt to close the sampler is ignored") + return + } + + var wg sync.WaitGroup + wg.Add(1) + s.doneChan <- &wg + wg.Wait() +} + +// Equal implements Equal() of Sampler. +func (s *RemotelyControlledSampler) Equal(other Sampler) bool { + // NB The Equal() function is expensive and will be removed. See adaptiveSampler.Equal() for + // more information. + if o, ok := other.(*RemotelyControlledSampler); ok { + s.RLock() + o.RLock() + defer s.RUnlock() + defer o.RUnlock() + return s.sampler.Equal(o.sampler) + } + return false +} + +func (s *RemotelyControlledSampler) pollController() { + ticker := time.NewTicker(s.samplingRefreshInterval) + defer ticker.Stop() + s.pollControllerWithTicker(ticker) +} + +func (s *RemotelyControlledSampler) pollControllerWithTicker(ticker *time.Ticker) { + for { + select { + case <-ticker.C: + s.updateSampler() + case wg := <-s.doneChan: + wg.Done() + return + } + } +} + +func (s *RemotelyControlledSampler) getSampler() Sampler { + s.Lock() + defer s.Unlock() + return s.sampler +} + +func (s *RemotelyControlledSampler) setSampler(sampler Sampler) { + s.Lock() + defer s.Unlock() + s.sampler = sampler +} + +func (s *RemotelyControlledSampler) updateSampler() { + res, err := s.manager.GetSamplingStrategy(s.serviceName) + if err != nil { + s.metrics.SamplerQueryFailure.Inc(1) + return + } + s.Lock() + defer s.Unlock() + + s.metrics.SamplerRetrieved.Inc(1) + if strategies := res.GetOperationSampling(); strategies != nil { + s.updateAdaptiveSampler(strategies) + } else { + err = s.updateRateLimitingOrProbabilisticSampler(res) + } + if err != nil { + s.metrics.SamplerUpdateFailure.Inc(1) + s.logger.Infof("Unable to handle sampling strategy response %+v. Got error: %v", res, err) + return + } + s.metrics.SamplerUpdated.Inc(1) +} + +// NB: this function should only be called while holding a Write lock +func (s *RemotelyControlledSampler) updateAdaptiveSampler(strategies *sampling.PerOperationSamplingStrategies) { + if adaptiveSampler, ok := s.sampler.(*adaptiveSampler); ok { + adaptiveSampler.update(strategies) + } else { + s.sampler = newAdaptiveSampler(strategies, s.maxOperations) + } +} + +// NB: this function should only be called while holding a Write lock +func (s *RemotelyControlledSampler) updateRateLimitingOrProbabilisticSampler(res *sampling.SamplingStrategyResponse) error { + var newSampler Sampler + if probabilistic := res.GetProbabilisticSampling(); probabilistic != nil { + newSampler = newProbabilisticSampler(probabilistic.SamplingRate) + } else if rateLimiting := res.GetRateLimitingSampling(); rateLimiting != nil { + newSampler = NewRateLimitingSampler(float64(rateLimiting.MaxTracesPerSecond)) + } else { + return fmt.Errorf("Unsupported sampling strategy type %v", res.GetStrategyType()) + } + if !s.sampler.Equal(newSampler) { + s.sampler = newSampler + } + return nil +} diff --git a/vendor/github.com/uber/jaeger-client-go/sampler_options.go b/vendor/github.com/uber/jaeger-client-go/sampler_options.go new file mode 100644 index 000000000000..75d28a561190 --- /dev/null +++ b/vendor/github.com/uber/jaeger-client-go/sampler_options.go @@ -0,0 +1,81 @@ +// Copyright (c) 2017 Uber Technologies, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package jaeger + +import ( + "time" +) + +// SamplerOption is a function that sets some option on the sampler +type SamplerOption func(options *samplerOptions) + +// SamplerOptions is a factory for all available SamplerOption's +var SamplerOptions samplerOptions + +type samplerOptions struct { + metrics *Metrics + maxOperations int + sampler Sampler + logger Logger + samplingServerURL string + samplingRefreshInterval time.Duration +} + +// Metrics creates a SamplerOption that initializes Metrics on the sampler, +// which is used to emit statistics. +func (samplerOptions) Metrics(m *Metrics) SamplerOption { + return func(o *samplerOptions) { + o.metrics = m + } +} + +// MaxOperations creates a SamplerOption that sets the maximum number of +// operations the sampler will keep track of. +func (samplerOptions) MaxOperations(maxOperations int) SamplerOption { + return func(o *samplerOptions) { + o.maxOperations = maxOperations + } +} + +// InitialSampler creates a SamplerOption that sets the initial sampler +// to use before a remote sampler is created and used. +func (samplerOptions) InitialSampler(sampler Sampler) SamplerOption { + return func(o *samplerOptions) { + o.sampler = sampler + } +} + +// Logger creates a SamplerOption that sets the logger used by the sampler. +func (samplerOptions) Logger(logger Logger) SamplerOption { + return func(o *samplerOptions) { + o.logger = logger + } +} + +// SamplingServerURL creates a SamplerOption that sets the sampling server url +// of the local agent that contains the sampling strategies. +func (samplerOptions) SamplingServerURL(samplingServerURL string) SamplerOption { + return func(o *samplerOptions) { + o.samplingServerURL = samplingServerURL + } +} + +// SamplingRefreshInterval creates a SamplerOption that sets how often the +// sampler will poll local agent for the appropriate sampling strategy. +func (samplerOptions) SamplingRefreshInterval(samplingRefreshInterval time.Duration) SamplerOption { + return func(o *samplerOptions) { + o.samplingRefreshInterval = samplingRefreshInterval + } +} diff --git a/vendor/github.com/uber/jaeger-client-go/span.go b/vendor/github.com/uber/jaeger-client-go/span.go new file mode 100644 index 000000000000..f0b497a90a4d --- /dev/null +++ b/vendor/github.com/uber/jaeger-client-go/span.go @@ -0,0 +1,249 @@ +// Copyright (c) 2017-2018 Uber Technologies, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package jaeger + +import ( + "sync" + "time" + + "github.com/opentracing/opentracing-go" + "github.com/opentracing/opentracing-go/ext" + "github.com/opentracing/opentracing-go/log" +) + +// Span implements opentracing.Span +type Span struct { + sync.RWMutex + + tracer *Tracer + + context SpanContext + + // The name of the "operation" this span is an instance of. + // Known as a "span name" in some implementations. + operationName string + + // firstInProcess, if true, indicates that this span is the root of the (sub)tree + // of spans in the current process. In other words it's true for the root spans, + // and the ingress spans when the process joins another trace. + firstInProcess bool + + // startTime is the timestamp indicating when the span began, with microseconds precision. + startTime time.Time + + // duration returns duration of the span with microseconds precision. + // Zero value means duration is unknown. + duration time.Duration + + // tags attached to this span + tags []Tag + + // The span's "micro-log" + logs []opentracing.LogRecord + + // references for this span + references []Reference + + observer ContribSpanObserver +} + +// Tag is a simple key value wrapper. +// TODO deprecate in the next major release, use opentracing.Tag instead. +type Tag struct { + key string + value interface{} +} + +// SetOperationName sets or changes the operation name. +func (s *Span) SetOperationName(operationName string) opentracing.Span { + s.Lock() + defer s.Unlock() + if s.context.IsSampled() { + s.operationName = operationName + } + s.observer.OnSetOperationName(operationName) + return s +} + +// SetTag implements SetTag() of opentracing.Span +func (s *Span) SetTag(key string, value interface{}) opentracing.Span { + s.observer.OnSetTag(key, value) + if key == string(ext.SamplingPriority) && !setSamplingPriority(s, value) { + return s + } + s.Lock() + defer s.Unlock() + if s.context.IsSampled() { + s.setTagNoLocking(key, value) + } + return s +} + +func (s *Span) setTagNoLocking(key string, value interface{}) { + s.tags = append(s.tags, Tag{key: key, value: value}) +} + +// LogFields implements opentracing.Span API +func (s *Span) LogFields(fields ...log.Field) { + s.Lock() + defer s.Unlock() + if !s.context.IsSampled() { + return + } + s.logFieldsNoLocking(fields...) +} + +// this function should only be called while holding a Write lock +func (s *Span) logFieldsNoLocking(fields ...log.Field) { + lr := opentracing.LogRecord{ + Fields: fields, + Timestamp: time.Now(), + } + s.appendLog(lr) +} + +// LogKV implements opentracing.Span API +func (s *Span) LogKV(alternatingKeyValues ...interface{}) { + s.RLock() + sampled := s.context.IsSampled() + s.RUnlock() + if !sampled { + return + } + fields, err := log.InterleavedKVToFields(alternatingKeyValues...) + if err != nil { + s.LogFields(log.Error(err), log.String("function", "LogKV")) + return + } + s.LogFields(fields...) +} + +// LogEvent implements opentracing.Span API +func (s *Span) LogEvent(event string) { + s.Log(opentracing.LogData{Event: event}) +} + +// LogEventWithPayload implements opentracing.Span API +func (s *Span) LogEventWithPayload(event string, payload interface{}) { + s.Log(opentracing.LogData{Event: event, Payload: payload}) +} + +// Log implements opentracing.Span API +func (s *Span) Log(ld opentracing.LogData) { + s.Lock() + defer s.Unlock() + if s.context.IsSampled() { + if ld.Timestamp.IsZero() { + ld.Timestamp = s.tracer.timeNow() + } + s.appendLog(ld.ToLogRecord()) + } +} + +// this function should only be called while holding a Write lock +func (s *Span) appendLog(lr opentracing.LogRecord) { + // TODO add logic to limit number of logs per span (issue #46) + s.logs = append(s.logs, lr) +} + +// SetBaggageItem implements SetBaggageItem() of opentracing.SpanContext +func (s *Span) SetBaggageItem(key, value string) opentracing.Span { + s.Lock() + defer s.Unlock() + s.tracer.setBaggage(s, key, value) + return s +} + +// BaggageItem implements BaggageItem() of opentracing.SpanContext +func (s *Span) BaggageItem(key string) string { + s.RLock() + defer s.RUnlock() + return s.context.baggage[key] +} + +// Finish implements opentracing.Span API +func (s *Span) Finish() { + s.FinishWithOptions(opentracing.FinishOptions{}) +} + +// FinishWithOptions implements opentracing.Span API +func (s *Span) FinishWithOptions(options opentracing.FinishOptions) { + if options.FinishTime.IsZero() { + options.FinishTime = s.tracer.timeNow() + } + s.observer.OnFinish(options) + s.Lock() + if s.context.IsSampled() { + s.duration = options.FinishTime.Sub(s.startTime) + // Note: bulk logs are not subject to maxLogsPerSpan limit + if options.LogRecords != nil { + s.logs = append(s.logs, options.LogRecords...) + } + for _, ld := range options.BulkLogData { + s.logs = append(s.logs, ld.ToLogRecord()) + } + } + s.Unlock() + // call reportSpan even for non-sampled traces, to return span to the pool + s.tracer.reportSpan(s) +} + +// Context implements opentracing.Span API +func (s *Span) Context() opentracing.SpanContext { + s.Lock() + defer s.Unlock() + return s.context +} + +// Tracer implements opentracing.Span API +func (s *Span) Tracer() opentracing.Tracer { + return s.tracer +} + +func (s *Span) String() string { + s.RLock() + defer s.RUnlock() + return s.context.String() +} + +// OperationName allows retrieving current operation name. +func (s *Span) OperationName() string { + s.RLock() + defer s.RUnlock() + return s.operationName +} + +func (s *Span) serviceName() string { + return s.tracer.serviceName +} + +// setSamplingPriority returns true if the flag was updated successfully, false otherwise. +func setSamplingPriority(s *Span, value interface{}) bool { + s.Lock() + defer s.Unlock() + val, ok := value.(uint16) + if !ok { + return false + } + if val == 0 { + s.context.flags = s.context.flags & (^flagSampled) + return true + } + if s.tracer.isDebugAllowed(s.operationName) { + s.context.flags = s.context.flags | flagDebug | flagSampled + return true + } + return false +} diff --git a/vendor/github.com/uber/jaeger-client-go/thrift-gen/agent/agent.go b/vendor/github.com/uber/jaeger-client-go/thrift-gen/agent/agent.go new file mode 100644 index 000000000000..e48811c500af --- /dev/null +++ b/vendor/github.com/uber/jaeger-client-go/thrift-gen/agent/agent.go @@ -0,0 +1,411 @@ +// Autogenerated by Thrift Compiler (0.9.3) +// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING + +package agent + +import ( + "bytes" + "fmt" + "github.com/uber/jaeger-client-go/thrift" + "github.com/uber/jaeger-client-go/thrift-gen/jaeger" + "github.com/uber/jaeger-client-go/thrift-gen/zipkincore" +) + +// (needed to ensure safety because of naive import list construction.) +var _ = thrift.ZERO +var _ = fmt.Printf +var _ = bytes.Equal + +var _ = jaeger.GoUnusedProtection__ +var _ = zipkincore.GoUnusedProtection__ + +type Agent interface { + // Parameters: + // - Spans + EmitZipkinBatch(spans []*zipkincore.Span) (err error) + // Parameters: + // - Batch + EmitBatch(batch *jaeger.Batch) (err error) +} + +type AgentClient struct { + Transport thrift.TTransport + ProtocolFactory thrift.TProtocolFactory + InputProtocol thrift.TProtocol + OutputProtocol thrift.TProtocol + SeqId int32 +} + +func NewAgentClientFactory(t thrift.TTransport, f thrift.TProtocolFactory) *AgentClient { + return &AgentClient{Transport: t, + ProtocolFactory: f, + InputProtocol: f.GetProtocol(t), + OutputProtocol: f.GetProtocol(t), + SeqId: 0, + } +} + +func NewAgentClientProtocol(t thrift.TTransport, iprot thrift.TProtocol, oprot thrift.TProtocol) *AgentClient { + return &AgentClient{Transport: t, + ProtocolFactory: nil, + InputProtocol: iprot, + OutputProtocol: oprot, + SeqId: 0, + } +} + +// Parameters: +// - Spans +func (p *AgentClient) EmitZipkinBatch(spans []*zipkincore.Span) (err error) { + if err = p.sendEmitZipkinBatch(spans); err != nil { + return + } + return +} + +func (p *AgentClient) sendEmitZipkinBatch(spans []*zipkincore.Span) (err error) { + oprot := p.OutputProtocol + if oprot == nil { + oprot = p.ProtocolFactory.GetProtocol(p.Transport) + p.OutputProtocol = oprot + } + p.SeqId++ + if err = oprot.WriteMessageBegin("emitZipkinBatch", thrift.ONEWAY, p.SeqId); err != nil { + return + } + args := AgentEmitZipkinBatchArgs{ + Spans: spans, + } + if err = args.Write(oprot); err != nil { + return + } + if err = oprot.WriteMessageEnd(); err != nil { + return + } + return oprot.Flush() +} + +// Parameters: +// - Batch +func (p *AgentClient) EmitBatch(batch *jaeger.Batch) (err error) { + if err = p.sendEmitBatch(batch); err != nil { + return + } + return +} + +func (p *AgentClient) sendEmitBatch(batch *jaeger.Batch) (err error) { + oprot := p.OutputProtocol + if oprot == nil { + oprot = p.ProtocolFactory.GetProtocol(p.Transport) + p.OutputProtocol = oprot + } + p.SeqId++ + if err = oprot.WriteMessageBegin("emitBatch", thrift.ONEWAY, p.SeqId); err != nil { + return + } + args := AgentEmitBatchArgs{ + Batch: batch, + } + if err = args.Write(oprot); err != nil { + return + } + if err = oprot.WriteMessageEnd(); err != nil { + return + } + return oprot.Flush() +} + +type AgentProcessor struct { + processorMap map[string]thrift.TProcessorFunction + handler Agent +} + +func (p *AgentProcessor) AddToProcessorMap(key string, processor thrift.TProcessorFunction) { + p.processorMap[key] = processor +} + +func (p *AgentProcessor) GetProcessorFunction(key string) (processor thrift.TProcessorFunction, ok bool) { + processor, ok = p.processorMap[key] + return processor, ok +} + +func (p *AgentProcessor) ProcessorMap() map[string]thrift.TProcessorFunction { + return p.processorMap +} + +func NewAgentProcessor(handler Agent) *AgentProcessor { + + self0 := &AgentProcessor{handler: handler, processorMap: make(map[string]thrift.TProcessorFunction)} + self0.processorMap["emitZipkinBatch"] = &agentProcessorEmitZipkinBatch{handler: handler} + self0.processorMap["emitBatch"] = &agentProcessorEmitBatch{handler: handler} + return self0 +} + +func (p *AgentProcessor) Process(iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + name, _, seqId, err := iprot.ReadMessageBegin() + if err != nil { + return false, err + } + if processor, ok := p.GetProcessorFunction(name); ok { + return processor.Process(seqId, iprot, oprot) + } + iprot.Skip(thrift.STRUCT) + iprot.ReadMessageEnd() + x1 := thrift.NewTApplicationException(thrift.UNKNOWN_METHOD, "Unknown function "+name) + oprot.WriteMessageBegin(name, thrift.EXCEPTION, seqId) + x1.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush() + return false, x1 + +} + +type agentProcessorEmitZipkinBatch struct { + handler Agent +} + +func (p *agentProcessorEmitZipkinBatch) Process(seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + args := AgentEmitZipkinBatchArgs{} + if err = args.Read(iprot); err != nil { + iprot.ReadMessageEnd() + return false, err + } + + iprot.ReadMessageEnd() + var err2 error + if err2 = p.handler.EmitZipkinBatch(args.Spans); err2 != nil { + return true, err2 + } + return true, nil +} + +type agentProcessorEmitBatch struct { + handler Agent +} + +func (p *agentProcessorEmitBatch) Process(seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + args := AgentEmitBatchArgs{} + if err = args.Read(iprot); err != nil { + iprot.ReadMessageEnd() + return false, err + } + + iprot.ReadMessageEnd() + var err2 error + if err2 = p.handler.EmitBatch(args.Batch); err2 != nil { + return true, err2 + } + return true, nil +} + +// HELPER FUNCTIONS AND STRUCTURES + +// Attributes: +// - Spans +type AgentEmitZipkinBatchArgs struct { + Spans []*zipkincore.Span `thrift:"spans,1" json:"spans"` +} + +func NewAgentEmitZipkinBatchArgs() *AgentEmitZipkinBatchArgs { + return &AgentEmitZipkinBatchArgs{} +} + +func (p *AgentEmitZipkinBatchArgs) GetSpans() []*zipkincore.Span { + return p.Spans +} +func (p *AgentEmitZipkinBatchArgs) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if err := p.readField1(iprot); err != nil { + return err + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *AgentEmitZipkinBatchArgs) readField1(iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin() + if err != nil { + return thrift.PrependError("error reading list begin: ", err) + } + tSlice := make([]*zipkincore.Span, 0, size) + p.Spans = tSlice + for i := 0; i < size; i++ { + _elem2 := &zipkincore.Span{} + if err := _elem2.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem2), err) + } + p.Spans = append(p.Spans, _elem2) + } + if err := iprot.ReadListEnd(); err != nil { + return thrift.PrependError("error reading list end: ", err) + } + return nil +} + +func (p *AgentEmitZipkinBatchArgs) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("emitZipkinBatch_args"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if err := p.writeField1(oprot); err != nil { + return err + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *AgentEmitZipkinBatchArgs) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("spans", thrift.LIST, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:spans: ", p), err) + } + if err := oprot.WriteListBegin(thrift.STRUCT, len(p.Spans)); err != nil { + return thrift.PrependError("error writing list begin: ", err) + } + for _, v := range p.Spans { + if err := v.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) + } + } + if err := oprot.WriteListEnd(); err != nil { + return thrift.PrependError("error writing list end: ", err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:spans: ", p), err) + } + return err +} + +func (p *AgentEmitZipkinBatchArgs) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("AgentEmitZipkinBatchArgs(%+v)", *p) +} + +// Attributes: +// - Batch +type AgentEmitBatchArgs struct { + Batch *jaeger.Batch `thrift:"batch,1" json:"batch"` +} + +func NewAgentEmitBatchArgs() *AgentEmitBatchArgs { + return &AgentEmitBatchArgs{} +} + +var AgentEmitBatchArgs_Batch_DEFAULT *jaeger.Batch + +func (p *AgentEmitBatchArgs) GetBatch() *jaeger.Batch { + if !p.IsSetBatch() { + return AgentEmitBatchArgs_Batch_DEFAULT + } + return p.Batch +} +func (p *AgentEmitBatchArgs) IsSetBatch() bool { + return p.Batch != nil +} + +func (p *AgentEmitBatchArgs) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if err := p.readField1(iprot); err != nil { + return err + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *AgentEmitBatchArgs) readField1(iprot thrift.TProtocol) error { + p.Batch = &jaeger.Batch{} + if err := p.Batch.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Batch), err) + } + return nil +} + +func (p *AgentEmitBatchArgs) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("emitBatch_args"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if err := p.writeField1(oprot); err != nil { + return err + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *AgentEmitBatchArgs) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("batch", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:batch: ", p), err) + } + if err := p.Batch.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Batch), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:batch: ", p), err) + } + return err +} + +func (p *AgentEmitBatchArgs) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("AgentEmitBatchArgs(%+v)", *p) +} diff --git a/vendor/github.com/uber/jaeger-client-go/thrift-gen/agent/constants.go b/vendor/github.com/uber/jaeger-client-go/thrift-gen/agent/constants.go new file mode 100644 index 000000000000..aa9857bb82ac --- /dev/null +++ b/vendor/github.com/uber/jaeger-client-go/thrift-gen/agent/constants.go @@ -0,0 +1,23 @@ +// Autogenerated by Thrift Compiler (0.9.3) +// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING + +package agent + +import ( + "bytes" + "fmt" + "github.com/uber/jaeger-client-go/thrift" + "github.com/uber/jaeger-client-go/thrift-gen/jaeger" + "github.com/uber/jaeger-client-go/thrift-gen/zipkincore" +) + +// (needed to ensure safety because of naive import list construction.) +var _ = thrift.ZERO +var _ = fmt.Printf +var _ = bytes.Equal + +var _ = jaeger.GoUnusedProtection__ +var _ = zipkincore.GoUnusedProtection__ + +func init() { +} diff --git a/vendor/github.com/uber/jaeger-client-go/thrift-gen/agent/ttypes.go b/vendor/github.com/uber/jaeger-client-go/thrift-gen/agent/ttypes.go new file mode 100644 index 000000000000..9c28f11c1ac7 --- /dev/null +++ b/vendor/github.com/uber/jaeger-client-go/thrift-gen/agent/ttypes.go @@ -0,0 +1,21 @@ +// Autogenerated by Thrift Compiler (0.9.3) +// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING + +package agent + +import ( + "bytes" + "fmt" + "github.com/uber/jaeger-client-go/thrift" + "github.com/uber/jaeger-client-go/thrift-gen/jaeger" + "github.com/uber/jaeger-client-go/thrift-gen/zipkincore" +) + +// (needed to ensure safety because of naive import list construction.) +var _ = thrift.ZERO +var _ = fmt.Printf +var _ = bytes.Equal + +var _ = jaeger.GoUnusedProtection__ +var _ = zipkincore.GoUnusedProtection__ +var GoUnusedProtection__ int diff --git a/vendor/github.com/uber/jaeger-client-go/thrift-gen/baggage/baggagerestrictionmanager.go b/vendor/github.com/uber/jaeger-client-go/thrift-gen/baggage/baggagerestrictionmanager.go new file mode 100644 index 000000000000..1f79c1255cb1 --- /dev/null +++ b/vendor/github.com/uber/jaeger-client-go/thrift-gen/baggage/baggagerestrictionmanager.go @@ -0,0 +1,435 @@ +// Autogenerated by Thrift Compiler (0.9.3) +// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING + +package baggage + +import ( + "bytes" + "fmt" + "github.com/uber/jaeger-client-go/thrift" +) + +// (needed to ensure safety because of naive import list construction.) +var _ = thrift.ZERO +var _ = fmt.Printf +var _ = bytes.Equal + +type BaggageRestrictionManager interface { + // getBaggageRestrictions retrieves the baggage restrictions for a specific service. + // Usually, baggageRestrictions apply to all services however there may be situations + // where a baggageKey might only be allowed to be set by a specific service. + // + // Parameters: + // - ServiceName + GetBaggageRestrictions(serviceName string) (r []*BaggageRestriction, err error) +} + +type BaggageRestrictionManagerClient struct { + Transport thrift.TTransport + ProtocolFactory thrift.TProtocolFactory + InputProtocol thrift.TProtocol + OutputProtocol thrift.TProtocol + SeqId int32 +} + +func NewBaggageRestrictionManagerClientFactory(t thrift.TTransport, f thrift.TProtocolFactory) *BaggageRestrictionManagerClient { + return &BaggageRestrictionManagerClient{Transport: t, + ProtocolFactory: f, + InputProtocol: f.GetProtocol(t), + OutputProtocol: f.GetProtocol(t), + SeqId: 0, + } +} + +func NewBaggageRestrictionManagerClientProtocol(t thrift.TTransport, iprot thrift.TProtocol, oprot thrift.TProtocol) *BaggageRestrictionManagerClient { + return &BaggageRestrictionManagerClient{Transport: t, + ProtocolFactory: nil, + InputProtocol: iprot, + OutputProtocol: oprot, + SeqId: 0, + } +} + +// getBaggageRestrictions retrieves the baggage restrictions for a specific service. +// Usually, baggageRestrictions apply to all services however there may be situations +// where a baggageKey might only be allowed to be set by a specific service. +// +// Parameters: +// - ServiceName +func (p *BaggageRestrictionManagerClient) GetBaggageRestrictions(serviceName string) (r []*BaggageRestriction, err error) { + if err = p.sendGetBaggageRestrictions(serviceName); err != nil { + return + } + return p.recvGetBaggageRestrictions() +} + +func (p *BaggageRestrictionManagerClient) sendGetBaggageRestrictions(serviceName string) (err error) { + oprot := p.OutputProtocol + if oprot == nil { + oprot = p.ProtocolFactory.GetProtocol(p.Transport) + p.OutputProtocol = oprot + } + p.SeqId++ + if err = oprot.WriteMessageBegin("getBaggageRestrictions", thrift.CALL, p.SeqId); err != nil { + return + } + args := BaggageRestrictionManagerGetBaggageRestrictionsArgs{ + ServiceName: serviceName, + } + if err = args.Write(oprot); err != nil { + return + } + if err = oprot.WriteMessageEnd(); err != nil { + return + } + return oprot.Flush() +} + +func (p *BaggageRestrictionManagerClient) recvGetBaggageRestrictions() (value []*BaggageRestriction, err error) { + iprot := p.InputProtocol + if iprot == nil { + iprot = p.ProtocolFactory.GetProtocol(p.Transport) + p.InputProtocol = iprot + } + method, mTypeId, seqId, err := iprot.ReadMessageBegin() + if err != nil { + return + } + if method != "getBaggageRestrictions" { + err = thrift.NewTApplicationException(thrift.WRONG_METHOD_NAME, "getBaggageRestrictions failed: wrong method name") + return + } + if p.SeqId != seqId { + err = thrift.NewTApplicationException(thrift.BAD_SEQUENCE_ID, "getBaggageRestrictions failed: out of sequence response") + return + } + if mTypeId == thrift.EXCEPTION { + error0 := thrift.NewTApplicationException(thrift.UNKNOWN_APPLICATION_EXCEPTION, "Unknown Exception") + var error1 error + error1, err = error0.Read(iprot) + if err != nil { + return + } + if err = iprot.ReadMessageEnd(); err != nil { + return + } + err = error1 + return + } + if mTypeId != thrift.REPLY { + err = thrift.NewTApplicationException(thrift.INVALID_MESSAGE_TYPE_EXCEPTION, "getBaggageRestrictions failed: invalid message type") + return + } + result := BaggageRestrictionManagerGetBaggageRestrictionsResult{} + if err = result.Read(iprot); err != nil { + return + } + if err = iprot.ReadMessageEnd(); err != nil { + return + } + value = result.GetSuccess() + return +} + +type BaggageRestrictionManagerProcessor struct { + processorMap map[string]thrift.TProcessorFunction + handler BaggageRestrictionManager +} + +func (p *BaggageRestrictionManagerProcessor) AddToProcessorMap(key string, processor thrift.TProcessorFunction) { + p.processorMap[key] = processor +} + +func (p *BaggageRestrictionManagerProcessor) GetProcessorFunction(key string) (processor thrift.TProcessorFunction, ok bool) { + processor, ok = p.processorMap[key] + return processor, ok +} + +func (p *BaggageRestrictionManagerProcessor) ProcessorMap() map[string]thrift.TProcessorFunction { + return p.processorMap +} + +func NewBaggageRestrictionManagerProcessor(handler BaggageRestrictionManager) *BaggageRestrictionManagerProcessor { + + self2 := &BaggageRestrictionManagerProcessor{handler: handler, processorMap: make(map[string]thrift.TProcessorFunction)} + self2.processorMap["getBaggageRestrictions"] = &baggageRestrictionManagerProcessorGetBaggageRestrictions{handler: handler} + return self2 +} + +func (p *BaggageRestrictionManagerProcessor) Process(iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + name, _, seqId, err := iprot.ReadMessageBegin() + if err != nil { + return false, err + } + if processor, ok := p.GetProcessorFunction(name); ok { + return processor.Process(seqId, iprot, oprot) + } + iprot.Skip(thrift.STRUCT) + iprot.ReadMessageEnd() + x3 := thrift.NewTApplicationException(thrift.UNKNOWN_METHOD, "Unknown function "+name) + oprot.WriteMessageBegin(name, thrift.EXCEPTION, seqId) + x3.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush() + return false, x3 + +} + +type baggageRestrictionManagerProcessorGetBaggageRestrictions struct { + handler BaggageRestrictionManager +} + +func (p *baggageRestrictionManagerProcessorGetBaggageRestrictions) Process(seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + args := BaggageRestrictionManagerGetBaggageRestrictionsArgs{} + if err = args.Read(iprot); err != nil { + iprot.ReadMessageEnd() + x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) + oprot.WriteMessageBegin("getBaggageRestrictions", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush() + return false, err + } + + iprot.ReadMessageEnd() + result := BaggageRestrictionManagerGetBaggageRestrictionsResult{} + var retval []*BaggageRestriction + var err2 error + if retval, err2 = p.handler.GetBaggageRestrictions(args.ServiceName); err2 != nil { + x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing getBaggageRestrictions: "+err2.Error()) + oprot.WriteMessageBegin("getBaggageRestrictions", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush() + return true, err2 + } else { + result.Success = retval + } + if err2 = oprot.WriteMessageBegin("getBaggageRestrictions", thrift.REPLY, seqId); err2 != nil { + err = err2 + } + if err2 = result.Write(oprot); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.Flush(); err == nil && err2 != nil { + err = err2 + } + if err != nil { + return + } + return true, err +} + +// HELPER FUNCTIONS AND STRUCTURES + +// Attributes: +// - ServiceName +type BaggageRestrictionManagerGetBaggageRestrictionsArgs struct { + ServiceName string `thrift:"serviceName,1" json:"serviceName"` +} + +func NewBaggageRestrictionManagerGetBaggageRestrictionsArgs() *BaggageRestrictionManagerGetBaggageRestrictionsArgs { + return &BaggageRestrictionManagerGetBaggageRestrictionsArgs{} +} + +func (p *BaggageRestrictionManagerGetBaggageRestrictionsArgs) GetServiceName() string { + return p.ServiceName +} +func (p *BaggageRestrictionManagerGetBaggageRestrictionsArgs) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if err := p.readField1(iprot); err != nil { + return err + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *BaggageRestrictionManagerGetBaggageRestrictionsArgs) readField1(iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 1: ", err) + } else { + p.ServiceName = v + } + return nil +} + +func (p *BaggageRestrictionManagerGetBaggageRestrictionsArgs) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("getBaggageRestrictions_args"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if err := p.writeField1(oprot); err != nil { + return err + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *BaggageRestrictionManagerGetBaggageRestrictionsArgs) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("serviceName", thrift.STRING, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:serviceName: ", p), err) + } + if err := oprot.WriteString(string(p.ServiceName)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.serviceName (1) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:serviceName: ", p), err) + } + return err +} + +func (p *BaggageRestrictionManagerGetBaggageRestrictionsArgs) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("BaggageRestrictionManagerGetBaggageRestrictionsArgs(%+v)", *p) +} + +// Attributes: +// - Success +type BaggageRestrictionManagerGetBaggageRestrictionsResult struct { + Success []*BaggageRestriction `thrift:"success,0" json:"success,omitempty"` +} + +func NewBaggageRestrictionManagerGetBaggageRestrictionsResult() *BaggageRestrictionManagerGetBaggageRestrictionsResult { + return &BaggageRestrictionManagerGetBaggageRestrictionsResult{} +} + +var BaggageRestrictionManagerGetBaggageRestrictionsResult_Success_DEFAULT []*BaggageRestriction + +func (p *BaggageRestrictionManagerGetBaggageRestrictionsResult) GetSuccess() []*BaggageRestriction { + return p.Success +} +func (p *BaggageRestrictionManagerGetBaggageRestrictionsResult) IsSetSuccess() bool { + return p.Success != nil +} + +func (p *BaggageRestrictionManagerGetBaggageRestrictionsResult) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 0: + if err := p.readField0(iprot); err != nil { + return err + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *BaggageRestrictionManagerGetBaggageRestrictionsResult) readField0(iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin() + if err != nil { + return thrift.PrependError("error reading list begin: ", err) + } + tSlice := make([]*BaggageRestriction, 0, size) + p.Success = tSlice + for i := 0; i < size; i++ { + _elem4 := &BaggageRestriction{} + if err := _elem4.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem4), err) + } + p.Success = append(p.Success, _elem4) + } + if err := iprot.ReadListEnd(); err != nil { + return thrift.PrependError("error reading list end: ", err) + } + return nil +} + +func (p *BaggageRestrictionManagerGetBaggageRestrictionsResult) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("getBaggageRestrictions_result"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if err := p.writeField0(oprot); err != nil { + return err + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *BaggageRestrictionManagerGetBaggageRestrictionsResult) writeField0(oprot thrift.TProtocol) (err error) { + if p.IsSetSuccess() { + if err := oprot.WriteFieldBegin("success", thrift.LIST, 0); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 0:success: ", p), err) + } + if err := oprot.WriteListBegin(thrift.STRUCT, len(p.Success)); err != nil { + return thrift.PrependError("error writing list begin: ", err) + } + for _, v := range p.Success { + if err := v.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) + } + } + if err := oprot.WriteListEnd(); err != nil { + return thrift.PrependError("error writing list end: ", err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 0:success: ", p), err) + } + } + return err +} + +func (p *BaggageRestrictionManagerGetBaggageRestrictionsResult) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("BaggageRestrictionManagerGetBaggageRestrictionsResult(%+v)", *p) +} diff --git a/vendor/github.com/uber/jaeger-client-go/thrift-gen/baggage/constants.go b/vendor/github.com/uber/jaeger-client-go/thrift-gen/baggage/constants.go new file mode 100644 index 000000000000..ed35ce9ab514 --- /dev/null +++ b/vendor/github.com/uber/jaeger-client-go/thrift-gen/baggage/constants.go @@ -0,0 +1,18 @@ +// Autogenerated by Thrift Compiler (0.9.3) +// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING + +package baggage + +import ( + "bytes" + "fmt" + "github.com/uber/jaeger-client-go/thrift" +) + +// (needed to ensure safety because of naive import list construction.) +var _ = thrift.ZERO +var _ = fmt.Printf +var _ = bytes.Equal + +func init() { +} diff --git a/vendor/github.com/uber/jaeger-client-go/thrift-gen/baggage/ttypes.go b/vendor/github.com/uber/jaeger-client-go/thrift-gen/baggage/ttypes.go new file mode 100644 index 000000000000..7888892f633f --- /dev/null +++ b/vendor/github.com/uber/jaeger-client-go/thrift-gen/baggage/ttypes.go @@ -0,0 +1,154 @@ +// Autogenerated by Thrift Compiler (0.9.3) +// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING + +package baggage + +import ( + "bytes" + "fmt" + "github.com/uber/jaeger-client-go/thrift" +) + +// (needed to ensure safety because of naive import list construction.) +var _ = thrift.ZERO +var _ = fmt.Printf +var _ = bytes.Equal + +var GoUnusedProtection__ int + +// Attributes: +// - BaggageKey +// - MaxValueLength +type BaggageRestriction struct { + BaggageKey string `thrift:"baggageKey,1,required" json:"baggageKey"` + MaxValueLength int32 `thrift:"maxValueLength,2,required" json:"maxValueLength"` +} + +func NewBaggageRestriction() *BaggageRestriction { + return &BaggageRestriction{} +} + +func (p *BaggageRestriction) GetBaggageKey() string { + return p.BaggageKey +} + +func (p *BaggageRestriction) GetMaxValueLength() int32 { + return p.MaxValueLength +} +func (p *BaggageRestriction) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + var issetBaggageKey bool = false + var issetMaxValueLength bool = false + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if err := p.readField1(iprot); err != nil { + return err + } + issetBaggageKey = true + case 2: + if err := p.readField2(iprot); err != nil { + return err + } + issetMaxValueLength = true + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + if !issetBaggageKey { + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field BaggageKey is not set")) + } + if !issetMaxValueLength { + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field MaxValueLength is not set")) + } + return nil +} + +func (p *BaggageRestriction) readField1(iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 1: ", err) + } else { + p.BaggageKey = v + } + return nil +} + +func (p *BaggageRestriction) readField2(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 2: ", err) + } else { + p.MaxValueLength = v + } + return nil +} + +func (p *BaggageRestriction) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("BaggageRestriction"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *BaggageRestriction) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("baggageKey", thrift.STRING, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:baggageKey: ", p), err) + } + if err := oprot.WriteString(string(p.BaggageKey)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.baggageKey (1) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:baggageKey: ", p), err) + } + return err +} + +func (p *BaggageRestriction) writeField2(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("maxValueLength", thrift.I32, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:maxValueLength: ", p), err) + } + if err := oprot.WriteI32(int32(p.MaxValueLength)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.maxValueLength (2) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:maxValueLength: ", p), err) + } + return err +} + +func (p *BaggageRestriction) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("BaggageRestriction(%+v)", *p) +} diff --git a/vendor/github.com/uber/jaeger-client-go/thrift-gen/jaeger/agent.go b/vendor/github.com/uber/jaeger-client-go/thrift-gen/jaeger/agent.go new file mode 100644 index 000000000000..b32c37dd2615 --- /dev/null +++ b/vendor/github.com/uber/jaeger-client-go/thrift-gen/jaeger/agent.go @@ -0,0 +1,242 @@ +// Autogenerated by Thrift Compiler (0.9.3) +// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING + +package jaeger + +import ( + "bytes" + "fmt" + "github.com/uber/jaeger-client-go/thrift" +) + +// (needed to ensure safety because of naive import list construction.) +var _ = thrift.ZERO +var _ = fmt.Printf +var _ = bytes.Equal + +type Agent interface { + // Parameters: + // - Batch + EmitBatch(batch *Batch) (err error) +} + +type AgentClient struct { + Transport thrift.TTransport + ProtocolFactory thrift.TProtocolFactory + InputProtocol thrift.TProtocol + OutputProtocol thrift.TProtocol + SeqId int32 +} + +func NewAgentClientFactory(t thrift.TTransport, f thrift.TProtocolFactory) *AgentClient { + return &AgentClient{Transport: t, + ProtocolFactory: f, + InputProtocol: f.GetProtocol(t), + OutputProtocol: f.GetProtocol(t), + SeqId: 0, + } +} + +func NewAgentClientProtocol(t thrift.TTransport, iprot thrift.TProtocol, oprot thrift.TProtocol) *AgentClient { + return &AgentClient{Transport: t, + ProtocolFactory: nil, + InputProtocol: iprot, + OutputProtocol: oprot, + SeqId: 0, + } +} + +// Parameters: +// - Batch +func (p *AgentClient) EmitBatch(batch *Batch) (err error) { + if err = p.sendEmitBatch(batch); err != nil { + return + } + return +} + +func (p *AgentClient) sendEmitBatch(batch *Batch) (err error) { + oprot := p.OutputProtocol + if oprot == nil { + oprot = p.ProtocolFactory.GetProtocol(p.Transport) + p.OutputProtocol = oprot + } + p.SeqId++ + if err = oprot.WriteMessageBegin("emitBatch", thrift.ONEWAY, p.SeqId); err != nil { + return + } + args := AgentEmitBatchArgs{ + Batch: batch, + } + if err = args.Write(oprot); err != nil { + return + } + if err = oprot.WriteMessageEnd(); err != nil { + return + } + return oprot.Flush() +} + +type AgentProcessor struct { + processorMap map[string]thrift.TProcessorFunction + handler Agent +} + +func (p *AgentProcessor) AddToProcessorMap(key string, processor thrift.TProcessorFunction) { + p.processorMap[key] = processor +} + +func (p *AgentProcessor) GetProcessorFunction(key string) (processor thrift.TProcessorFunction, ok bool) { + processor, ok = p.processorMap[key] + return processor, ok +} + +func (p *AgentProcessor) ProcessorMap() map[string]thrift.TProcessorFunction { + return p.processorMap +} + +func NewAgentProcessor(handler Agent) *AgentProcessor { + + self6 := &AgentProcessor{handler: handler, processorMap: make(map[string]thrift.TProcessorFunction)} + self6.processorMap["emitBatch"] = &agentProcessorEmitBatch{handler: handler} + return self6 +} + +func (p *AgentProcessor) Process(iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + name, _, seqId, err := iprot.ReadMessageBegin() + if err != nil { + return false, err + } + if processor, ok := p.GetProcessorFunction(name); ok { + return processor.Process(seqId, iprot, oprot) + } + iprot.Skip(thrift.STRUCT) + iprot.ReadMessageEnd() + x7 := thrift.NewTApplicationException(thrift.UNKNOWN_METHOD, "Unknown function "+name) + oprot.WriteMessageBegin(name, thrift.EXCEPTION, seqId) + x7.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush() + return false, x7 + +} + +type agentProcessorEmitBatch struct { + handler Agent +} + +func (p *agentProcessorEmitBatch) Process(seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + args := AgentEmitBatchArgs{} + if err = args.Read(iprot); err != nil { + iprot.ReadMessageEnd() + return false, err + } + + iprot.ReadMessageEnd() + var err2 error + if err2 = p.handler.EmitBatch(args.Batch); err2 != nil { + return true, err2 + } + return true, nil +} + +// HELPER FUNCTIONS AND STRUCTURES + +// Attributes: +// - Batch +type AgentEmitBatchArgs struct { + Batch *Batch `thrift:"batch,1" json:"batch"` +} + +func NewAgentEmitBatchArgs() *AgentEmitBatchArgs { + return &AgentEmitBatchArgs{} +} + +var AgentEmitBatchArgs_Batch_DEFAULT *Batch + +func (p *AgentEmitBatchArgs) GetBatch() *Batch { + if !p.IsSetBatch() { + return AgentEmitBatchArgs_Batch_DEFAULT + } + return p.Batch +} +func (p *AgentEmitBatchArgs) IsSetBatch() bool { + return p.Batch != nil +} + +func (p *AgentEmitBatchArgs) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if err := p.readField1(iprot); err != nil { + return err + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *AgentEmitBatchArgs) readField1(iprot thrift.TProtocol) error { + p.Batch = &Batch{} + if err := p.Batch.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Batch), err) + } + return nil +} + +func (p *AgentEmitBatchArgs) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("emitBatch_args"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if err := p.writeField1(oprot); err != nil { + return err + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *AgentEmitBatchArgs) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("batch", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:batch: ", p), err) + } + if err := p.Batch.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Batch), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:batch: ", p), err) + } + return err +} + +func (p *AgentEmitBatchArgs) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("AgentEmitBatchArgs(%+v)", *p) +} diff --git a/vendor/github.com/uber/jaeger-client-go/thrift-gen/jaeger/constants.go b/vendor/github.com/uber/jaeger-client-go/thrift-gen/jaeger/constants.go new file mode 100644 index 000000000000..621b8b1c20f4 --- /dev/null +++ b/vendor/github.com/uber/jaeger-client-go/thrift-gen/jaeger/constants.go @@ -0,0 +1,18 @@ +// Autogenerated by Thrift Compiler (0.9.3) +// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING + +package jaeger + +import ( + "bytes" + "fmt" + "github.com/uber/jaeger-client-go/thrift" +) + +// (needed to ensure safety because of naive import list construction.) +var _ = thrift.ZERO +var _ = fmt.Printf +var _ = bytes.Equal + +func init() { +} diff --git a/vendor/github.com/uber/jaeger-client-go/thrift-gen/jaeger/ttypes.go b/vendor/github.com/uber/jaeger-client-go/thrift-gen/jaeger/ttypes.go new file mode 100644 index 000000000000..d23ed2fc2839 --- /dev/null +++ b/vendor/github.com/uber/jaeger-client-go/thrift-gen/jaeger/ttypes.go @@ -0,0 +1,1838 @@ +// Autogenerated by Thrift Compiler (0.9.3) +// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING + +package jaeger + +import ( + "bytes" + "fmt" + "github.com/uber/jaeger-client-go/thrift" +) + +// (needed to ensure safety because of naive import list construction.) +var _ = thrift.ZERO +var _ = fmt.Printf +var _ = bytes.Equal + +var GoUnusedProtection__ int + +type TagType int64 + +const ( + TagType_STRING TagType = 0 + TagType_DOUBLE TagType = 1 + TagType_BOOL TagType = 2 + TagType_LONG TagType = 3 + TagType_BINARY TagType = 4 +) + +func (p TagType) String() string { + switch p { + case TagType_STRING: + return "STRING" + case TagType_DOUBLE: + return "DOUBLE" + case TagType_BOOL: + return "BOOL" + case TagType_LONG: + return "LONG" + case TagType_BINARY: + return "BINARY" + } + return "" +} + +func TagTypeFromString(s string) (TagType, error) { + switch s { + case "STRING": + return TagType_STRING, nil + case "DOUBLE": + return TagType_DOUBLE, nil + case "BOOL": + return TagType_BOOL, nil + case "LONG": + return TagType_LONG, nil + case "BINARY": + return TagType_BINARY, nil + } + return TagType(0), fmt.Errorf("not a valid TagType string") +} + +func TagTypePtr(v TagType) *TagType { return &v } + +func (p TagType) MarshalText() ([]byte, error) { + return []byte(p.String()), nil +} + +func (p *TagType) UnmarshalText(text []byte) error { + q, err := TagTypeFromString(string(text)) + if err != nil { + return err + } + *p = q + return nil +} + +type SpanRefType int64 + +const ( + SpanRefType_CHILD_OF SpanRefType = 0 + SpanRefType_FOLLOWS_FROM SpanRefType = 1 +) + +func (p SpanRefType) String() string { + switch p { + case SpanRefType_CHILD_OF: + return "CHILD_OF" + case SpanRefType_FOLLOWS_FROM: + return "FOLLOWS_FROM" + } + return "" +} + +func SpanRefTypeFromString(s string) (SpanRefType, error) { + switch s { + case "CHILD_OF": + return SpanRefType_CHILD_OF, nil + case "FOLLOWS_FROM": + return SpanRefType_FOLLOWS_FROM, nil + } + return SpanRefType(0), fmt.Errorf("not a valid SpanRefType string") +} + +func SpanRefTypePtr(v SpanRefType) *SpanRefType { return &v } + +func (p SpanRefType) MarshalText() ([]byte, error) { + return []byte(p.String()), nil +} + +func (p *SpanRefType) UnmarshalText(text []byte) error { + q, err := SpanRefTypeFromString(string(text)) + if err != nil { + return err + } + *p = q + return nil +} + +// Attributes: +// - Key +// - VType +// - VStr +// - VDouble +// - VBool +// - VLong +// - VBinary +type Tag struct { + Key string `thrift:"key,1,required" json:"key"` + VType TagType `thrift:"vType,2,required" json:"vType"` + VStr *string `thrift:"vStr,3" json:"vStr,omitempty"` + VDouble *float64 `thrift:"vDouble,4" json:"vDouble,omitempty"` + VBool *bool `thrift:"vBool,5" json:"vBool,omitempty"` + VLong *int64 `thrift:"vLong,6" json:"vLong,omitempty"` + VBinary []byte `thrift:"vBinary,7" json:"vBinary,omitempty"` +} + +func NewTag() *Tag { + return &Tag{} +} + +func (p *Tag) GetKey() string { + return p.Key +} + +func (p *Tag) GetVType() TagType { + return p.VType +} + +var Tag_VStr_DEFAULT string + +func (p *Tag) GetVStr() string { + if !p.IsSetVStr() { + return Tag_VStr_DEFAULT + } + return *p.VStr +} + +var Tag_VDouble_DEFAULT float64 + +func (p *Tag) GetVDouble() float64 { + if !p.IsSetVDouble() { + return Tag_VDouble_DEFAULT + } + return *p.VDouble +} + +var Tag_VBool_DEFAULT bool + +func (p *Tag) GetVBool() bool { + if !p.IsSetVBool() { + return Tag_VBool_DEFAULT + } + return *p.VBool +} + +var Tag_VLong_DEFAULT int64 + +func (p *Tag) GetVLong() int64 { + if !p.IsSetVLong() { + return Tag_VLong_DEFAULT + } + return *p.VLong +} + +var Tag_VBinary_DEFAULT []byte + +func (p *Tag) GetVBinary() []byte { + return p.VBinary +} +func (p *Tag) IsSetVStr() bool { + return p.VStr != nil +} + +func (p *Tag) IsSetVDouble() bool { + return p.VDouble != nil +} + +func (p *Tag) IsSetVBool() bool { + return p.VBool != nil +} + +func (p *Tag) IsSetVLong() bool { + return p.VLong != nil +} + +func (p *Tag) IsSetVBinary() bool { + return p.VBinary != nil +} + +func (p *Tag) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + var issetKey bool = false + var issetVType bool = false + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if err := p.readField1(iprot); err != nil { + return err + } + issetKey = true + case 2: + if err := p.readField2(iprot); err != nil { + return err + } + issetVType = true + case 3: + if err := p.readField3(iprot); err != nil { + return err + } + case 4: + if err := p.readField4(iprot); err != nil { + return err + } + case 5: + if err := p.readField5(iprot); err != nil { + return err + } + case 6: + if err := p.readField6(iprot); err != nil { + return err + } + case 7: + if err := p.readField7(iprot); err != nil { + return err + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + if !issetKey { + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Key is not set")) + } + if !issetVType { + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field VType is not set")) + } + return nil +} + +func (p *Tag) readField1(iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 1: ", err) + } else { + p.Key = v + } + return nil +} + +func (p *Tag) readField2(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 2: ", err) + } else { + temp := TagType(v) + p.VType = temp + } + return nil +} + +func (p *Tag) readField3(iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 3: ", err) + } else { + p.VStr = &v + } + return nil +} + +func (p *Tag) readField4(iprot thrift.TProtocol) error { + if v, err := iprot.ReadDouble(); err != nil { + return thrift.PrependError("error reading field 4: ", err) + } else { + p.VDouble = &v + } + return nil +} + +func (p *Tag) readField5(iprot thrift.TProtocol) error { + if v, err := iprot.ReadBool(); err != nil { + return thrift.PrependError("error reading field 5: ", err) + } else { + p.VBool = &v + } + return nil +} + +func (p *Tag) readField6(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(); err != nil { + return thrift.PrependError("error reading field 6: ", err) + } else { + p.VLong = &v + } + return nil +} + +func (p *Tag) readField7(iprot thrift.TProtocol) error { + if v, err := iprot.ReadBinary(); err != nil { + return thrift.PrependError("error reading field 7: ", err) + } else { + p.VBinary = v + } + return nil +} + +func (p *Tag) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("Tag"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + if err := p.writeField3(oprot); err != nil { + return err + } + if err := p.writeField4(oprot); err != nil { + return err + } + if err := p.writeField5(oprot); err != nil { + return err + } + if err := p.writeField6(oprot); err != nil { + return err + } + if err := p.writeField7(oprot); err != nil { + return err + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *Tag) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("key", thrift.STRING, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:key: ", p), err) + } + if err := oprot.WriteString(string(p.Key)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.key (1) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:key: ", p), err) + } + return err +} + +func (p *Tag) writeField2(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("vType", thrift.I32, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:vType: ", p), err) + } + if err := oprot.WriteI32(int32(p.VType)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.vType (2) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:vType: ", p), err) + } + return err +} + +func (p *Tag) writeField3(oprot thrift.TProtocol) (err error) { + if p.IsSetVStr() { + if err := oprot.WriteFieldBegin("vStr", thrift.STRING, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:vStr: ", p), err) + } + if err := oprot.WriteString(string(*p.VStr)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.vStr (3) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:vStr: ", p), err) + } + } + return err +} + +func (p *Tag) writeField4(oprot thrift.TProtocol) (err error) { + if p.IsSetVDouble() { + if err := oprot.WriteFieldBegin("vDouble", thrift.DOUBLE, 4); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:vDouble: ", p), err) + } + if err := oprot.WriteDouble(float64(*p.VDouble)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.vDouble (4) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 4:vDouble: ", p), err) + } + } + return err +} + +func (p *Tag) writeField5(oprot thrift.TProtocol) (err error) { + if p.IsSetVBool() { + if err := oprot.WriteFieldBegin("vBool", thrift.BOOL, 5); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 5:vBool: ", p), err) + } + if err := oprot.WriteBool(bool(*p.VBool)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.vBool (5) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 5:vBool: ", p), err) + } + } + return err +} + +func (p *Tag) writeField6(oprot thrift.TProtocol) (err error) { + if p.IsSetVLong() { + if err := oprot.WriteFieldBegin("vLong", thrift.I64, 6); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 6:vLong: ", p), err) + } + if err := oprot.WriteI64(int64(*p.VLong)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.vLong (6) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 6:vLong: ", p), err) + } + } + return err +} + +func (p *Tag) writeField7(oprot thrift.TProtocol) (err error) { + if p.IsSetVBinary() { + if err := oprot.WriteFieldBegin("vBinary", thrift.STRING, 7); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 7:vBinary: ", p), err) + } + if err := oprot.WriteBinary(p.VBinary); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.vBinary (7) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 7:vBinary: ", p), err) + } + } + return err +} + +func (p *Tag) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("Tag(%+v)", *p) +} + +// Attributes: +// - Timestamp +// - Fields +type Log struct { + Timestamp int64 `thrift:"timestamp,1,required" json:"timestamp"` + Fields []*Tag `thrift:"fields,2,required" json:"fields"` +} + +func NewLog() *Log { + return &Log{} +} + +func (p *Log) GetTimestamp() int64 { + return p.Timestamp +} + +func (p *Log) GetFields() []*Tag { + return p.Fields +} +func (p *Log) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + var issetTimestamp bool = false + var issetFields bool = false + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if err := p.readField1(iprot); err != nil { + return err + } + issetTimestamp = true + case 2: + if err := p.readField2(iprot); err != nil { + return err + } + issetFields = true + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + if !issetTimestamp { + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Timestamp is not set")) + } + if !issetFields { + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Fields is not set")) + } + return nil +} + +func (p *Log) readField1(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(); err != nil { + return thrift.PrependError("error reading field 1: ", err) + } else { + p.Timestamp = v + } + return nil +} + +func (p *Log) readField2(iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin() + if err != nil { + return thrift.PrependError("error reading list begin: ", err) + } + tSlice := make([]*Tag, 0, size) + p.Fields = tSlice + for i := 0; i < size; i++ { + _elem0 := &Tag{} + if err := _elem0.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem0), err) + } + p.Fields = append(p.Fields, _elem0) + } + if err := iprot.ReadListEnd(); err != nil { + return thrift.PrependError("error reading list end: ", err) + } + return nil +} + +func (p *Log) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("Log"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *Log) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("timestamp", thrift.I64, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:timestamp: ", p), err) + } + if err := oprot.WriteI64(int64(p.Timestamp)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.timestamp (1) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:timestamp: ", p), err) + } + return err +} + +func (p *Log) writeField2(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("fields", thrift.LIST, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:fields: ", p), err) + } + if err := oprot.WriteListBegin(thrift.STRUCT, len(p.Fields)); err != nil { + return thrift.PrependError("error writing list begin: ", err) + } + for _, v := range p.Fields { + if err := v.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) + } + } + if err := oprot.WriteListEnd(); err != nil { + return thrift.PrependError("error writing list end: ", err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:fields: ", p), err) + } + return err +} + +func (p *Log) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("Log(%+v)", *p) +} + +// Attributes: +// - RefType +// - TraceIdLow +// - TraceIdHigh +// - SpanId +type SpanRef struct { + RefType SpanRefType `thrift:"refType,1,required" json:"refType"` + TraceIdLow int64 `thrift:"traceIdLow,2,required" json:"traceIdLow"` + TraceIdHigh int64 `thrift:"traceIdHigh,3,required" json:"traceIdHigh"` + SpanId int64 `thrift:"spanId,4,required" json:"spanId"` +} + +func NewSpanRef() *SpanRef { + return &SpanRef{} +} + +func (p *SpanRef) GetRefType() SpanRefType { + return p.RefType +} + +func (p *SpanRef) GetTraceIdLow() int64 { + return p.TraceIdLow +} + +func (p *SpanRef) GetTraceIdHigh() int64 { + return p.TraceIdHigh +} + +func (p *SpanRef) GetSpanId() int64 { + return p.SpanId +} +func (p *SpanRef) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + var issetRefType bool = false + var issetTraceIdLow bool = false + var issetTraceIdHigh bool = false + var issetSpanId bool = false + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if err := p.readField1(iprot); err != nil { + return err + } + issetRefType = true + case 2: + if err := p.readField2(iprot); err != nil { + return err + } + issetTraceIdLow = true + case 3: + if err := p.readField3(iprot); err != nil { + return err + } + issetTraceIdHigh = true + case 4: + if err := p.readField4(iprot); err != nil { + return err + } + issetSpanId = true + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + if !issetRefType { + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field RefType is not set")) + } + if !issetTraceIdLow { + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field TraceIdLow is not set")) + } + if !issetTraceIdHigh { + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field TraceIdHigh is not set")) + } + if !issetSpanId { + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field SpanId is not set")) + } + return nil +} + +func (p *SpanRef) readField1(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 1: ", err) + } else { + temp := SpanRefType(v) + p.RefType = temp + } + return nil +} + +func (p *SpanRef) readField2(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(); err != nil { + return thrift.PrependError("error reading field 2: ", err) + } else { + p.TraceIdLow = v + } + return nil +} + +func (p *SpanRef) readField3(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(); err != nil { + return thrift.PrependError("error reading field 3: ", err) + } else { + p.TraceIdHigh = v + } + return nil +} + +func (p *SpanRef) readField4(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(); err != nil { + return thrift.PrependError("error reading field 4: ", err) + } else { + p.SpanId = v + } + return nil +} + +func (p *SpanRef) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("SpanRef"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + if err := p.writeField3(oprot); err != nil { + return err + } + if err := p.writeField4(oprot); err != nil { + return err + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *SpanRef) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("refType", thrift.I32, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:refType: ", p), err) + } + if err := oprot.WriteI32(int32(p.RefType)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.refType (1) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:refType: ", p), err) + } + return err +} + +func (p *SpanRef) writeField2(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("traceIdLow", thrift.I64, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:traceIdLow: ", p), err) + } + if err := oprot.WriteI64(int64(p.TraceIdLow)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.traceIdLow (2) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:traceIdLow: ", p), err) + } + return err +} + +func (p *SpanRef) writeField3(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("traceIdHigh", thrift.I64, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:traceIdHigh: ", p), err) + } + if err := oprot.WriteI64(int64(p.TraceIdHigh)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.traceIdHigh (3) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:traceIdHigh: ", p), err) + } + return err +} + +func (p *SpanRef) writeField4(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("spanId", thrift.I64, 4); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:spanId: ", p), err) + } + if err := oprot.WriteI64(int64(p.SpanId)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.spanId (4) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 4:spanId: ", p), err) + } + return err +} + +func (p *SpanRef) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("SpanRef(%+v)", *p) +} + +// Attributes: +// - TraceIdLow +// - TraceIdHigh +// - SpanId +// - ParentSpanId +// - OperationName +// - References +// - Flags +// - StartTime +// - Duration +// - Tags +// - Logs +type Span struct { + TraceIdLow int64 `thrift:"traceIdLow,1,required" json:"traceIdLow"` + TraceIdHigh int64 `thrift:"traceIdHigh,2,required" json:"traceIdHigh"` + SpanId int64 `thrift:"spanId,3,required" json:"spanId"` + ParentSpanId int64 `thrift:"parentSpanId,4,required" json:"parentSpanId"` + OperationName string `thrift:"operationName,5,required" json:"operationName"` + References []*SpanRef `thrift:"references,6" json:"references,omitempty"` + Flags int32 `thrift:"flags,7,required" json:"flags"` + StartTime int64 `thrift:"startTime,8,required" json:"startTime"` + Duration int64 `thrift:"duration,9,required" json:"duration"` + Tags []*Tag `thrift:"tags,10" json:"tags,omitempty"` + Logs []*Log `thrift:"logs,11" json:"logs,omitempty"` +} + +func NewSpan() *Span { + return &Span{} +} + +func (p *Span) GetTraceIdLow() int64 { + return p.TraceIdLow +} + +func (p *Span) GetTraceIdHigh() int64 { + return p.TraceIdHigh +} + +func (p *Span) GetSpanId() int64 { + return p.SpanId +} + +func (p *Span) GetParentSpanId() int64 { + return p.ParentSpanId +} + +func (p *Span) GetOperationName() string { + return p.OperationName +} + +var Span_References_DEFAULT []*SpanRef + +func (p *Span) GetReferences() []*SpanRef { + return p.References +} + +func (p *Span) GetFlags() int32 { + return p.Flags +} + +func (p *Span) GetStartTime() int64 { + return p.StartTime +} + +func (p *Span) GetDuration() int64 { + return p.Duration +} + +var Span_Tags_DEFAULT []*Tag + +func (p *Span) GetTags() []*Tag { + return p.Tags +} + +var Span_Logs_DEFAULT []*Log + +func (p *Span) GetLogs() []*Log { + return p.Logs +} +func (p *Span) IsSetReferences() bool { + return p.References != nil +} + +func (p *Span) IsSetTags() bool { + return p.Tags != nil +} + +func (p *Span) IsSetLogs() bool { + return p.Logs != nil +} + +func (p *Span) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + var issetTraceIdLow bool = false + var issetTraceIdHigh bool = false + var issetSpanId bool = false + var issetParentSpanId bool = false + var issetOperationName bool = false + var issetFlags bool = false + var issetStartTime bool = false + var issetDuration bool = false + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if err := p.readField1(iprot); err != nil { + return err + } + issetTraceIdLow = true + case 2: + if err := p.readField2(iprot); err != nil { + return err + } + issetTraceIdHigh = true + case 3: + if err := p.readField3(iprot); err != nil { + return err + } + issetSpanId = true + case 4: + if err := p.readField4(iprot); err != nil { + return err + } + issetParentSpanId = true + case 5: + if err := p.readField5(iprot); err != nil { + return err + } + issetOperationName = true + case 6: + if err := p.readField6(iprot); err != nil { + return err + } + case 7: + if err := p.readField7(iprot); err != nil { + return err + } + issetFlags = true + case 8: + if err := p.readField8(iprot); err != nil { + return err + } + issetStartTime = true + case 9: + if err := p.readField9(iprot); err != nil { + return err + } + issetDuration = true + case 10: + if err := p.readField10(iprot); err != nil { + return err + } + case 11: + if err := p.readField11(iprot); err != nil { + return err + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + if !issetTraceIdLow { + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field TraceIdLow is not set")) + } + if !issetTraceIdHigh { + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field TraceIdHigh is not set")) + } + if !issetSpanId { + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field SpanId is not set")) + } + if !issetParentSpanId { + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field ParentSpanId is not set")) + } + if !issetOperationName { + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field OperationName is not set")) + } + if !issetFlags { + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Flags is not set")) + } + if !issetStartTime { + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field StartTime is not set")) + } + if !issetDuration { + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Duration is not set")) + } + return nil +} + +func (p *Span) readField1(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(); err != nil { + return thrift.PrependError("error reading field 1: ", err) + } else { + p.TraceIdLow = v + } + return nil +} + +func (p *Span) readField2(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(); err != nil { + return thrift.PrependError("error reading field 2: ", err) + } else { + p.TraceIdHigh = v + } + return nil +} + +func (p *Span) readField3(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(); err != nil { + return thrift.PrependError("error reading field 3: ", err) + } else { + p.SpanId = v + } + return nil +} + +func (p *Span) readField4(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(); err != nil { + return thrift.PrependError("error reading field 4: ", err) + } else { + p.ParentSpanId = v + } + return nil +} + +func (p *Span) readField5(iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 5: ", err) + } else { + p.OperationName = v + } + return nil +} + +func (p *Span) readField6(iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin() + if err != nil { + return thrift.PrependError("error reading list begin: ", err) + } + tSlice := make([]*SpanRef, 0, size) + p.References = tSlice + for i := 0; i < size; i++ { + _elem1 := &SpanRef{} + if err := _elem1.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem1), err) + } + p.References = append(p.References, _elem1) + } + if err := iprot.ReadListEnd(); err != nil { + return thrift.PrependError("error reading list end: ", err) + } + return nil +} + +func (p *Span) readField7(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 7: ", err) + } else { + p.Flags = v + } + return nil +} + +func (p *Span) readField8(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(); err != nil { + return thrift.PrependError("error reading field 8: ", err) + } else { + p.StartTime = v + } + return nil +} + +func (p *Span) readField9(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(); err != nil { + return thrift.PrependError("error reading field 9: ", err) + } else { + p.Duration = v + } + return nil +} + +func (p *Span) readField10(iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin() + if err != nil { + return thrift.PrependError("error reading list begin: ", err) + } + tSlice := make([]*Tag, 0, size) + p.Tags = tSlice + for i := 0; i < size; i++ { + _elem2 := &Tag{} + if err := _elem2.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem2), err) + } + p.Tags = append(p.Tags, _elem2) + } + if err := iprot.ReadListEnd(); err != nil { + return thrift.PrependError("error reading list end: ", err) + } + return nil +} + +func (p *Span) readField11(iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin() + if err != nil { + return thrift.PrependError("error reading list begin: ", err) + } + tSlice := make([]*Log, 0, size) + p.Logs = tSlice + for i := 0; i < size; i++ { + _elem3 := &Log{} + if err := _elem3.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem3), err) + } + p.Logs = append(p.Logs, _elem3) + } + if err := iprot.ReadListEnd(); err != nil { + return thrift.PrependError("error reading list end: ", err) + } + return nil +} + +func (p *Span) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("Span"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + if err := p.writeField3(oprot); err != nil { + return err + } + if err := p.writeField4(oprot); err != nil { + return err + } + if err := p.writeField5(oprot); err != nil { + return err + } + if err := p.writeField6(oprot); err != nil { + return err + } + if err := p.writeField7(oprot); err != nil { + return err + } + if err := p.writeField8(oprot); err != nil { + return err + } + if err := p.writeField9(oprot); err != nil { + return err + } + if err := p.writeField10(oprot); err != nil { + return err + } + if err := p.writeField11(oprot); err != nil { + return err + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *Span) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("traceIdLow", thrift.I64, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:traceIdLow: ", p), err) + } + if err := oprot.WriteI64(int64(p.TraceIdLow)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.traceIdLow (1) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:traceIdLow: ", p), err) + } + return err +} + +func (p *Span) writeField2(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("traceIdHigh", thrift.I64, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:traceIdHigh: ", p), err) + } + if err := oprot.WriteI64(int64(p.TraceIdHigh)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.traceIdHigh (2) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:traceIdHigh: ", p), err) + } + return err +} + +func (p *Span) writeField3(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("spanId", thrift.I64, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:spanId: ", p), err) + } + if err := oprot.WriteI64(int64(p.SpanId)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.spanId (3) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:spanId: ", p), err) + } + return err +} + +func (p *Span) writeField4(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("parentSpanId", thrift.I64, 4); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:parentSpanId: ", p), err) + } + if err := oprot.WriteI64(int64(p.ParentSpanId)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.parentSpanId (4) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 4:parentSpanId: ", p), err) + } + return err +} + +func (p *Span) writeField5(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("operationName", thrift.STRING, 5); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 5:operationName: ", p), err) + } + if err := oprot.WriteString(string(p.OperationName)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.operationName (5) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 5:operationName: ", p), err) + } + return err +} + +func (p *Span) writeField6(oprot thrift.TProtocol) (err error) { + if p.IsSetReferences() { + if err := oprot.WriteFieldBegin("references", thrift.LIST, 6); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 6:references: ", p), err) + } + if err := oprot.WriteListBegin(thrift.STRUCT, len(p.References)); err != nil { + return thrift.PrependError("error writing list begin: ", err) + } + for _, v := range p.References { + if err := v.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) + } + } + if err := oprot.WriteListEnd(); err != nil { + return thrift.PrependError("error writing list end: ", err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 6:references: ", p), err) + } + } + return err +} + +func (p *Span) writeField7(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("flags", thrift.I32, 7); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 7:flags: ", p), err) + } + if err := oprot.WriteI32(int32(p.Flags)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.flags (7) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 7:flags: ", p), err) + } + return err +} + +func (p *Span) writeField8(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("startTime", thrift.I64, 8); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 8:startTime: ", p), err) + } + if err := oprot.WriteI64(int64(p.StartTime)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.startTime (8) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 8:startTime: ", p), err) + } + return err +} + +func (p *Span) writeField9(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("duration", thrift.I64, 9); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 9:duration: ", p), err) + } + if err := oprot.WriteI64(int64(p.Duration)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.duration (9) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 9:duration: ", p), err) + } + return err +} + +func (p *Span) writeField10(oprot thrift.TProtocol) (err error) { + if p.IsSetTags() { + if err := oprot.WriteFieldBegin("tags", thrift.LIST, 10); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 10:tags: ", p), err) + } + if err := oprot.WriteListBegin(thrift.STRUCT, len(p.Tags)); err != nil { + return thrift.PrependError("error writing list begin: ", err) + } + for _, v := range p.Tags { + if err := v.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) + } + } + if err := oprot.WriteListEnd(); err != nil { + return thrift.PrependError("error writing list end: ", err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 10:tags: ", p), err) + } + } + return err +} + +func (p *Span) writeField11(oprot thrift.TProtocol) (err error) { + if p.IsSetLogs() { + if err := oprot.WriteFieldBegin("logs", thrift.LIST, 11); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 11:logs: ", p), err) + } + if err := oprot.WriteListBegin(thrift.STRUCT, len(p.Logs)); err != nil { + return thrift.PrependError("error writing list begin: ", err) + } + for _, v := range p.Logs { + if err := v.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) + } + } + if err := oprot.WriteListEnd(); err != nil { + return thrift.PrependError("error writing list end: ", err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 11:logs: ", p), err) + } + } + return err +} + +func (p *Span) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("Span(%+v)", *p) +} + +// Attributes: +// - ServiceName +// - Tags +type Process struct { + ServiceName string `thrift:"serviceName,1,required" json:"serviceName"` + Tags []*Tag `thrift:"tags,2" json:"tags,omitempty"` +} + +func NewProcess() *Process { + return &Process{} +} + +func (p *Process) GetServiceName() string { + return p.ServiceName +} + +var Process_Tags_DEFAULT []*Tag + +func (p *Process) GetTags() []*Tag { + return p.Tags +} +func (p *Process) IsSetTags() bool { + return p.Tags != nil +} + +func (p *Process) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + var issetServiceName bool = false + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if err := p.readField1(iprot); err != nil { + return err + } + issetServiceName = true + case 2: + if err := p.readField2(iprot); err != nil { + return err + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + if !issetServiceName { + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field ServiceName is not set")) + } + return nil +} + +func (p *Process) readField1(iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 1: ", err) + } else { + p.ServiceName = v + } + return nil +} + +func (p *Process) readField2(iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin() + if err != nil { + return thrift.PrependError("error reading list begin: ", err) + } + tSlice := make([]*Tag, 0, size) + p.Tags = tSlice + for i := 0; i < size; i++ { + _elem4 := &Tag{} + if err := _elem4.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem4), err) + } + p.Tags = append(p.Tags, _elem4) + } + if err := iprot.ReadListEnd(); err != nil { + return thrift.PrependError("error reading list end: ", err) + } + return nil +} + +func (p *Process) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("Process"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *Process) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("serviceName", thrift.STRING, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:serviceName: ", p), err) + } + if err := oprot.WriteString(string(p.ServiceName)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.serviceName (1) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:serviceName: ", p), err) + } + return err +} + +func (p *Process) writeField2(oprot thrift.TProtocol) (err error) { + if p.IsSetTags() { + if err := oprot.WriteFieldBegin("tags", thrift.LIST, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:tags: ", p), err) + } + if err := oprot.WriteListBegin(thrift.STRUCT, len(p.Tags)); err != nil { + return thrift.PrependError("error writing list begin: ", err) + } + for _, v := range p.Tags { + if err := v.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) + } + } + if err := oprot.WriteListEnd(); err != nil { + return thrift.PrependError("error writing list end: ", err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:tags: ", p), err) + } + } + return err +} + +func (p *Process) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("Process(%+v)", *p) +} + +// Attributes: +// - Process +// - Spans +type Batch struct { + Process *Process `thrift:"process,1,required" json:"process"` + Spans []*Span `thrift:"spans,2,required" json:"spans"` +} + +func NewBatch() *Batch { + return &Batch{} +} + +var Batch_Process_DEFAULT *Process + +func (p *Batch) GetProcess() *Process { + if !p.IsSetProcess() { + return Batch_Process_DEFAULT + } + return p.Process +} + +func (p *Batch) GetSpans() []*Span { + return p.Spans +} +func (p *Batch) IsSetProcess() bool { + return p.Process != nil +} + +func (p *Batch) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + var issetProcess bool = false + var issetSpans bool = false + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if err := p.readField1(iprot); err != nil { + return err + } + issetProcess = true + case 2: + if err := p.readField2(iprot); err != nil { + return err + } + issetSpans = true + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + if !issetProcess { + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Process is not set")) + } + if !issetSpans { + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Spans is not set")) + } + return nil +} + +func (p *Batch) readField1(iprot thrift.TProtocol) error { + p.Process = &Process{} + if err := p.Process.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Process), err) + } + return nil +} + +func (p *Batch) readField2(iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin() + if err != nil { + return thrift.PrependError("error reading list begin: ", err) + } + tSlice := make([]*Span, 0, size) + p.Spans = tSlice + for i := 0; i < size; i++ { + _elem5 := &Span{} + if err := _elem5.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem5), err) + } + p.Spans = append(p.Spans, _elem5) + } + if err := iprot.ReadListEnd(); err != nil { + return thrift.PrependError("error reading list end: ", err) + } + return nil +} + +func (p *Batch) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("Batch"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *Batch) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("process", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:process: ", p), err) + } + if err := p.Process.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Process), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:process: ", p), err) + } + return err +} + +func (p *Batch) writeField2(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("spans", thrift.LIST, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:spans: ", p), err) + } + if err := oprot.WriteListBegin(thrift.STRUCT, len(p.Spans)); err != nil { + return thrift.PrependError("error writing list begin: ", err) + } + for _, v := range p.Spans { + if err := v.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) + } + } + if err := oprot.WriteListEnd(); err != nil { + return thrift.PrependError("error writing list end: ", err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:spans: ", p), err) + } + return err +} + +func (p *Batch) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("Batch(%+v)", *p) +} + +// Attributes: +// - Ok +type BatchSubmitResponse struct { + Ok bool `thrift:"ok,1,required" json:"ok"` +} + +func NewBatchSubmitResponse() *BatchSubmitResponse { + return &BatchSubmitResponse{} +} + +func (p *BatchSubmitResponse) GetOk() bool { + return p.Ok +} +func (p *BatchSubmitResponse) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + var issetOk bool = false + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if err := p.readField1(iprot); err != nil { + return err + } + issetOk = true + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + if !issetOk { + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Ok is not set")) + } + return nil +} + +func (p *BatchSubmitResponse) readField1(iprot thrift.TProtocol) error { + if v, err := iprot.ReadBool(); err != nil { + return thrift.PrependError("error reading field 1: ", err) + } else { + p.Ok = v + } + return nil +} + +func (p *BatchSubmitResponse) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("BatchSubmitResponse"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if err := p.writeField1(oprot); err != nil { + return err + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *BatchSubmitResponse) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("ok", thrift.BOOL, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:ok: ", p), err) + } + if err := oprot.WriteBool(bool(p.Ok)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.ok (1) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:ok: ", p), err) + } + return err +} + +func (p *BatchSubmitResponse) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("BatchSubmitResponse(%+v)", *p) +} diff --git a/vendor/github.com/uber/jaeger-client-go/thrift-gen/sampling/constants.go b/vendor/github.com/uber/jaeger-client-go/thrift-gen/sampling/constants.go new file mode 100644 index 000000000000..0f6e3a884d95 --- /dev/null +++ b/vendor/github.com/uber/jaeger-client-go/thrift-gen/sampling/constants.go @@ -0,0 +1,18 @@ +// Autogenerated by Thrift Compiler (0.9.3) +// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING + +package sampling + +import ( + "bytes" + "fmt" + "github.com/uber/jaeger-client-go/thrift" +) + +// (needed to ensure safety because of naive import list construction.) +var _ = thrift.ZERO +var _ = fmt.Printf +var _ = bytes.Equal + +func init() { +} diff --git a/vendor/github.com/uber/jaeger-client-go/thrift-gen/sampling/samplingmanager.go b/vendor/github.com/uber/jaeger-client-go/thrift-gen/sampling/samplingmanager.go new file mode 100644 index 000000000000..33179cfeb3b9 --- /dev/null +++ b/vendor/github.com/uber/jaeger-client-go/thrift-gen/sampling/samplingmanager.go @@ -0,0 +1,410 @@ +// Autogenerated by Thrift Compiler (0.9.3) +// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING + +package sampling + +import ( + "bytes" + "fmt" + "github.com/uber/jaeger-client-go/thrift" +) + +// (needed to ensure safety because of naive import list construction.) +var _ = thrift.ZERO +var _ = fmt.Printf +var _ = bytes.Equal + +type SamplingManager interface { + // Parameters: + // - ServiceName + GetSamplingStrategy(serviceName string) (r *SamplingStrategyResponse, err error) +} + +type SamplingManagerClient struct { + Transport thrift.TTransport + ProtocolFactory thrift.TProtocolFactory + InputProtocol thrift.TProtocol + OutputProtocol thrift.TProtocol + SeqId int32 +} + +func NewSamplingManagerClientFactory(t thrift.TTransport, f thrift.TProtocolFactory) *SamplingManagerClient { + return &SamplingManagerClient{Transport: t, + ProtocolFactory: f, + InputProtocol: f.GetProtocol(t), + OutputProtocol: f.GetProtocol(t), + SeqId: 0, + } +} + +func NewSamplingManagerClientProtocol(t thrift.TTransport, iprot thrift.TProtocol, oprot thrift.TProtocol) *SamplingManagerClient { + return &SamplingManagerClient{Transport: t, + ProtocolFactory: nil, + InputProtocol: iprot, + OutputProtocol: oprot, + SeqId: 0, + } +} + +// Parameters: +// - ServiceName +func (p *SamplingManagerClient) GetSamplingStrategy(serviceName string) (r *SamplingStrategyResponse, err error) { + if err = p.sendGetSamplingStrategy(serviceName); err != nil { + return + } + return p.recvGetSamplingStrategy() +} + +func (p *SamplingManagerClient) sendGetSamplingStrategy(serviceName string) (err error) { + oprot := p.OutputProtocol + if oprot == nil { + oprot = p.ProtocolFactory.GetProtocol(p.Transport) + p.OutputProtocol = oprot + } + p.SeqId++ + if err = oprot.WriteMessageBegin("getSamplingStrategy", thrift.CALL, p.SeqId); err != nil { + return + } + args := SamplingManagerGetSamplingStrategyArgs{ + ServiceName: serviceName, + } + if err = args.Write(oprot); err != nil { + return + } + if err = oprot.WriteMessageEnd(); err != nil { + return + } + return oprot.Flush() +} + +func (p *SamplingManagerClient) recvGetSamplingStrategy() (value *SamplingStrategyResponse, err error) { + iprot := p.InputProtocol + if iprot == nil { + iprot = p.ProtocolFactory.GetProtocol(p.Transport) + p.InputProtocol = iprot + } + method, mTypeId, seqId, err := iprot.ReadMessageBegin() + if err != nil { + return + } + if method != "getSamplingStrategy" { + err = thrift.NewTApplicationException(thrift.WRONG_METHOD_NAME, "getSamplingStrategy failed: wrong method name") + return + } + if p.SeqId != seqId { + err = thrift.NewTApplicationException(thrift.BAD_SEQUENCE_ID, "getSamplingStrategy failed: out of sequence response") + return + } + if mTypeId == thrift.EXCEPTION { + error1 := thrift.NewTApplicationException(thrift.UNKNOWN_APPLICATION_EXCEPTION, "Unknown Exception") + var error2 error + error2, err = error1.Read(iprot) + if err != nil { + return + } + if err = iprot.ReadMessageEnd(); err != nil { + return + } + err = error2 + return + } + if mTypeId != thrift.REPLY { + err = thrift.NewTApplicationException(thrift.INVALID_MESSAGE_TYPE_EXCEPTION, "getSamplingStrategy failed: invalid message type") + return + } + result := SamplingManagerGetSamplingStrategyResult{} + if err = result.Read(iprot); err != nil { + return + } + if err = iprot.ReadMessageEnd(); err != nil { + return + } + value = result.GetSuccess() + return +} + +type SamplingManagerProcessor struct { + processorMap map[string]thrift.TProcessorFunction + handler SamplingManager +} + +func (p *SamplingManagerProcessor) AddToProcessorMap(key string, processor thrift.TProcessorFunction) { + p.processorMap[key] = processor +} + +func (p *SamplingManagerProcessor) GetProcessorFunction(key string) (processor thrift.TProcessorFunction, ok bool) { + processor, ok = p.processorMap[key] + return processor, ok +} + +func (p *SamplingManagerProcessor) ProcessorMap() map[string]thrift.TProcessorFunction { + return p.processorMap +} + +func NewSamplingManagerProcessor(handler SamplingManager) *SamplingManagerProcessor { + + self3 := &SamplingManagerProcessor{handler: handler, processorMap: make(map[string]thrift.TProcessorFunction)} + self3.processorMap["getSamplingStrategy"] = &samplingManagerProcessorGetSamplingStrategy{handler: handler} + return self3 +} + +func (p *SamplingManagerProcessor) Process(iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + name, _, seqId, err := iprot.ReadMessageBegin() + if err != nil { + return false, err + } + if processor, ok := p.GetProcessorFunction(name); ok { + return processor.Process(seqId, iprot, oprot) + } + iprot.Skip(thrift.STRUCT) + iprot.ReadMessageEnd() + x4 := thrift.NewTApplicationException(thrift.UNKNOWN_METHOD, "Unknown function "+name) + oprot.WriteMessageBegin(name, thrift.EXCEPTION, seqId) + x4.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush() + return false, x4 + +} + +type samplingManagerProcessorGetSamplingStrategy struct { + handler SamplingManager +} + +func (p *samplingManagerProcessorGetSamplingStrategy) Process(seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + args := SamplingManagerGetSamplingStrategyArgs{} + if err = args.Read(iprot); err != nil { + iprot.ReadMessageEnd() + x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) + oprot.WriteMessageBegin("getSamplingStrategy", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush() + return false, err + } + + iprot.ReadMessageEnd() + result := SamplingManagerGetSamplingStrategyResult{} + var retval *SamplingStrategyResponse + var err2 error + if retval, err2 = p.handler.GetSamplingStrategy(args.ServiceName); err2 != nil { + x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing getSamplingStrategy: "+err2.Error()) + oprot.WriteMessageBegin("getSamplingStrategy", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush() + return true, err2 + } else { + result.Success = retval + } + if err2 = oprot.WriteMessageBegin("getSamplingStrategy", thrift.REPLY, seqId); err2 != nil { + err = err2 + } + if err2 = result.Write(oprot); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.Flush(); err == nil && err2 != nil { + err = err2 + } + if err != nil { + return + } + return true, err +} + +// HELPER FUNCTIONS AND STRUCTURES + +// Attributes: +// - ServiceName +type SamplingManagerGetSamplingStrategyArgs struct { + ServiceName string `thrift:"serviceName,1" json:"serviceName"` +} + +func NewSamplingManagerGetSamplingStrategyArgs() *SamplingManagerGetSamplingStrategyArgs { + return &SamplingManagerGetSamplingStrategyArgs{} +} + +func (p *SamplingManagerGetSamplingStrategyArgs) GetServiceName() string { + return p.ServiceName +} +func (p *SamplingManagerGetSamplingStrategyArgs) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if err := p.readField1(iprot); err != nil { + return err + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *SamplingManagerGetSamplingStrategyArgs) readField1(iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 1: ", err) + } else { + p.ServiceName = v + } + return nil +} + +func (p *SamplingManagerGetSamplingStrategyArgs) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("getSamplingStrategy_args"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if err := p.writeField1(oprot); err != nil { + return err + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *SamplingManagerGetSamplingStrategyArgs) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("serviceName", thrift.STRING, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:serviceName: ", p), err) + } + if err := oprot.WriteString(string(p.ServiceName)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.serviceName (1) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:serviceName: ", p), err) + } + return err +} + +func (p *SamplingManagerGetSamplingStrategyArgs) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("SamplingManagerGetSamplingStrategyArgs(%+v)", *p) +} + +// Attributes: +// - Success +type SamplingManagerGetSamplingStrategyResult struct { + Success *SamplingStrategyResponse `thrift:"success,0" json:"success,omitempty"` +} + +func NewSamplingManagerGetSamplingStrategyResult() *SamplingManagerGetSamplingStrategyResult { + return &SamplingManagerGetSamplingStrategyResult{} +} + +var SamplingManagerGetSamplingStrategyResult_Success_DEFAULT *SamplingStrategyResponse + +func (p *SamplingManagerGetSamplingStrategyResult) GetSuccess() *SamplingStrategyResponse { + if !p.IsSetSuccess() { + return SamplingManagerGetSamplingStrategyResult_Success_DEFAULT + } + return p.Success +} +func (p *SamplingManagerGetSamplingStrategyResult) IsSetSuccess() bool { + return p.Success != nil +} + +func (p *SamplingManagerGetSamplingStrategyResult) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 0: + if err := p.readField0(iprot); err != nil { + return err + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *SamplingManagerGetSamplingStrategyResult) readField0(iprot thrift.TProtocol) error { + p.Success = &SamplingStrategyResponse{} + if err := p.Success.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Success), err) + } + return nil +} + +func (p *SamplingManagerGetSamplingStrategyResult) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("getSamplingStrategy_result"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if err := p.writeField0(oprot); err != nil { + return err + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *SamplingManagerGetSamplingStrategyResult) writeField0(oprot thrift.TProtocol) (err error) { + if p.IsSetSuccess() { + if err := oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 0:success: ", p), err) + } + if err := p.Success.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Success), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 0:success: ", p), err) + } + } + return err +} + +func (p *SamplingManagerGetSamplingStrategyResult) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("SamplingManagerGetSamplingStrategyResult(%+v)", *p) +} diff --git a/vendor/github.com/uber/jaeger-client-go/thrift-gen/sampling/ttypes.go b/vendor/github.com/uber/jaeger-client-go/thrift-gen/sampling/ttypes.go new file mode 100644 index 000000000000..9abaf0542d40 --- /dev/null +++ b/vendor/github.com/uber/jaeger-client-go/thrift-gen/sampling/ttypes.go @@ -0,0 +1,873 @@ +// Autogenerated by Thrift Compiler (0.9.3) +// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING + +package sampling + +import ( + "bytes" + "fmt" + "github.com/uber/jaeger-client-go/thrift" +) + +// (needed to ensure safety because of naive import list construction.) +var _ = thrift.ZERO +var _ = fmt.Printf +var _ = bytes.Equal + +var GoUnusedProtection__ int + +type SamplingStrategyType int64 + +const ( + SamplingStrategyType_PROBABILISTIC SamplingStrategyType = 0 + SamplingStrategyType_RATE_LIMITING SamplingStrategyType = 1 +) + +func (p SamplingStrategyType) String() string { + switch p { + case SamplingStrategyType_PROBABILISTIC: + return "PROBABILISTIC" + case SamplingStrategyType_RATE_LIMITING: + return "RATE_LIMITING" + } + return "" +} + +func SamplingStrategyTypeFromString(s string) (SamplingStrategyType, error) { + switch s { + case "PROBABILISTIC": + return SamplingStrategyType_PROBABILISTIC, nil + case "RATE_LIMITING": + return SamplingStrategyType_RATE_LIMITING, nil + } + return SamplingStrategyType(0), fmt.Errorf("not a valid SamplingStrategyType string") +} + +func SamplingStrategyTypePtr(v SamplingStrategyType) *SamplingStrategyType { return &v } + +func (p SamplingStrategyType) MarshalText() ([]byte, error) { + return []byte(p.String()), nil +} + +func (p *SamplingStrategyType) UnmarshalText(text []byte) error { + q, err := SamplingStrategyTypeFromString(string(text)) + if err != nil { + return err + } + *p = q + return nil +} + +// Attributes: +// - SamplingRate +type ProbabilisticSamplingStrategy struct { + SamplingRate float64 `thrift:"samplingRate,1,required" json:"samplingRate"` +} + +func NewProbabilisticSamplingStrategy() *ProbabilisticSamplingStrategy { + return &ProbabilisticSamplingStrategy{} +} + +func (p *ProbabilisticSamplingStrategy) GetSamplingRate() float64 { + return p.SamplingRate +} +func (p *ProbabilisticSamplingStrategy) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + var issetSamplingRate bool = false + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if err := p.readField1(iprot); err != nil { + return err + } + issetSamplingRate = true + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + if !issetSamplingRate { + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field SamplingRate is not set")) + } + return nil +} + +func (p *ProbabilisticSamplingStrategy) readField1(iprot thrift.TProtocol) error { + if v, err := iprot.ReadDouble(); err != nil { + return thrift.PrependError("error reading field 1: ", err) + } else { + p.SamplingRate = v + } + return nil +} + +func (p *ProbabilisticSamplingStrategy) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("ProbabilisticSamplingStrategy"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if err := p.writeField1(oprot); err != nil { + return err + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *ProbabilisticSamplingStrategy) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("samplingRate", thrift.DOUBLE, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:samplingRate: ", p), err) + } + if err := oprot.WriteDouble(float64(p.SamplingRate)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.samplingRate (1) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:samplingRate: ", p), err) + } + return err +} + +func (p *ProbabilisticSamplingStrategy) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("ProbabilisticSamplingStrategy(%+v)", *p) +} + +// Attributes: +// - MaxTracesPerSecond +type RateLimitingSamplingStrategy struct { + MaxTracesPerSecond int16 `thrift:"maxTracesPerSecond,1,required" json:"maxTracesPerSecond"` +} + +func NewRateLimitingSamplingStrategy() *RateLimitingSamplingStrategy { + return &RateLimitingSamplingStrategy{} +} + +func (p *RateLimitingSamplingStrategy) GetMaxTracesPerSecond() int16 { + return p.MaxTracesPerSecond +} +func (p *RateLimitingSamplingStrategy) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + var issetMaxTracesPerSecond bool = false + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if err := p.readField1(iprot); err != nil { + return err + } + issetMaxTracesPerSecond = true + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + if !issetMaxTracesPerSecond { + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field MaxTracesPerSecond is not set")) + } + return nil +} + +func (p *RateLimitingSamplingStrategy) readField1(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI16(); err != nil { + return thrift.PrependError("error reading field 1: ", err) + } else { + p.MaxTracesPerSecond = v + } + return nil +} + +func (p *RateLimitingSamplingStrategy) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("RateLimitingSamplingStrategy"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if err := p.writeField1(oprot); err != nil { + return err + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *RateLimitingSamplingStrategy) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("maxTracesPerSecond", thrift.I16, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:maxTracesPerSecond: ", p), err) + } + if err := oprot.WriteI16(int16(p.MaxTracesPerSecond)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.maxTracesPerSecond (1) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:maxTracesPerSecond: ", p), err) + } + return err +} + +func (p *RateLimitingSamplingStrategy) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("RateLimitingSamplingStrategy(%+v)", *p) +} + +// Attributes: +// - Operation +// - ProbabilisticSampling +type OperationSamplingStrategy struct { + Operation string `thrift:"operation,1,required" json:"operation"` + ProbabilisticSampling *ProbabilisticSamplingStrategy `thrift:"probabilisticSampling,2,required" json:"probabilisticSampling"` +} + +func NewOperationSamplingStrategy() *OperationSamplingStrategy { + return &OperationSamplingStrategy{} +} + +func (p *OperationSamplingStrategy) GetOperation() string { + return p.Operation +} + +var OperationSamplingStrategy_ProbabilisticSampling_DEFAULT *ProbabilisticSamplingStrategy + +func (p *OperationSamplingStrategy) GetProbabilisticSampling() *ProbabilisticSamplingStrategy { + if !p.IsSetProbabilisticSampling() { + return OperationSamplingStrategy_ProbabilisticSampling_DEFAULT + } + return p.ProbabilisticSampling +} +func (p *OperationSamplingStrategy) IsSetProbabilisticSampling() bool { + return p.ProbabilisticSampling != nil +} + +func (p *OperationSamplingStrategy) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + var issetOperation bool = false + var issetProbabilisticSampling bool = false + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if err := p.readField1(iprot); err != nil { + return err + } + issetOperation = true + case 2: + if err := p.readField2(iprot); err != nil { + return err + } + issetProbabilisticSampling = true + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + if !issetOperation { + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Operation is not set")) + } + if !issetProbabilisticSampling { + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field ProbabilisticSampling is not set")) + } + return nil +} + +func (p *OperationSamplingStrategy) readField1(iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 1: ", err) + } else { + p.Operation = v + } + return nil +} + +func (p *OperationSamplingStrategy) readField2(iprot thrift.TProtocol) error { + p.ProbabilisticSampling = &ProbabilisticSamplingStrategy{} + if err := p.ProbabilisticSampling.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.ProbabilisticSampling), err) + } + return nil +} + +func (p *OperationSamplingStrategy) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("OperationSamplingStrategy"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *OperationSamplingStrategy) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("operation", thrift.STRING, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:operation: ", p), err) + } + if err := oprot.WriteString(string(p.Operation)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.operation (1) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:operation: ", p), err) + } + return err +} + +func (p *OperationSamplingStrategy) writeField2(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("probabilisticSampling", thrift.STRUCT, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:probabilisticSampling: ", p), err) + } + if err := p.ProbabilisticSampling.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.ProbabilisticSampling), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:probabilisticSampling: ", p), err) + } + return err +} + +func (p *OperationSamplingStrategy) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("OperationSamplingStrategy(%+v)", *p) +} + +// Attributes: +// - DefaultSamplingProbability +// - DefaultLowerBoundTracesPerSecond +// - PerOperationStrategies +// - DefaultUpperBoundTracesPerSecond +type PerOperationSamplingStrategies struct { + DefaultSamplingProbability float64 `thrift:"defaultSamplingProbability,1,required" json:"defaultSamplingProbability"` + DefaultLowerBoundTracesPerSecond float64 `thrift:"defaultLowerBoundTracesPerSecond,2,required" json:"defaultLowerBoundTracesPerSecond"` + PerOperationStrategies []*OperationSamplingStrategy `thrift:"perOperationStrategies,3,required" json:"perOperationStrategies"` + DefaultUpperBoundTracesPerSecond *float64 `thrift:"defaultUpperBoundTracesPerSecond,4" json:"defaultUpperBoundTracesPerSecond,omitempty"` +} + +func NewPerOperationSamplingStrategies() *PerOperationSamplingStrategies { + return &PerOperationSamplingStrategies{} +} + +func (p *PerOperationSamplingStrategies) GetDefaultSamplingProbability() float64 { + return p.DefaultSamplingProbability +} + +func (p *PerOperationSamplingStrategies) GetDefaultLowerBoundTracesPerSecond() float64 { + return p.DefaultLowerBoundTracesPerSecond +} + +func (p *PerOperationSamplingStrategies) GetPerOperationStrategies() []*OperationSamplingStrategy { + return p.PerOperationStrategies +} + +var PerOperationSamplingStrategies_DefaultUpperBoundTracesPerSecond_DEFAULT float64 + +func (p *PerOperationSamplingStrategies) GetDefaultUpperBoundTracesPerSecond() float64 { + if !p.IsSetDefaultUpperBoundTracesPerSecond() { + return PerOperationSamplingStrategies_DefaultUpperBoundTracesPerSecond_DEFAULT + } + return *p.DefaultUpperBoundTracesPerSecond +} +func (p *PerOperationSamplingStrategies) IsSetDefaultUpperBoundTracesPerSecond() bool { + return p.DefaultUpperBoundTracesPerSecond != nil +} + +func (p *PerOperationSamplingStrategies) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + var issetDefaultSamplingProbability bool = false + var issetDefaultLowerBoundTracesPerSecond bool = false + var issetPerOperationStrategies bool = false + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if err := p.readField1(iprot); err != nil { + return err + } + issetDefaultSamplingProbability = true + case 2: + if err := p.readField2(iprot); err != nil { + return err + } + issetDefaultLowerBoundTracesPerSecond = true + case 3: + if err := p.readField3(iprot); err != nil { + return err + } + issetPerOperationStrategies = true + case 4: + if err := p.readField4(iprot); err != nil { + return err + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + if !issetDefaultSamplingProbability { + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field DefaultSamplingProbability is not set")) + } + if !issetDefaultLowerBoundTracesPerSecond { + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field DefaultLowerBoundTracesPerSecond is not set")) + } + if !issetPerOperationStrategies { + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field PerOperationStrategies is not set")) + } + return nil +} + +func (p *PerOperationSamplingStrategies) readField1(iprot thrift.TProtocol) error { + if v, err := iprot.ReadDouble(); err != nil { + return thrift.PrependError("error reading field 1: ", err) + } else { + p.DefaultSamplingProbability = v + } + return nil +} + +func (p *PerOperationSamplingStrategies) readField2(iprot thrift.TProtocol) error { + if v, err := iprot.ReadDouble(); err != nil { + return thrift.PrependError("error reading field 2: ", err) + } else { + p.DefaultLowerBoundTracesPerSecond = v + } + return nil +} + +func (p *PerOperationSamplingStrategies) readField3(iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin() + if err != nil { + return thrift.PrependError("error reading list begin: ", err) + } + tSlice := make([]*OperationSamplingStrategy, 0, size) + p.PerOperationStrategies = tSlice + for i := 0; i < size; i++ { + _elem0 := &OperationSamplingStrategy{} + if err := _elem0.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem0), err) + } + p.PerOperationStrategies = append(p.PerOperationStrategies, _elem0) + } + if err := iprot.ReadListEnd(); err != nil { + return thrift.PrependError("error reading list end: ", err) + } + return nil +} + +func (p *PerOperationSamplingStrategies) readField4(iprot thrift.TProtocol) error { + if v, err := iprot.ReadDouble(); err != nil { + return thrift.PrependError("error reading field 4: ", err) + } else { + p.DefaultUpperBoundTracesPerSecond = &v + } + return nil +} + +func (p *PerOperationSamplingStrategies) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("PerOperationSamplingStrategies"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + if err := p.writeField3(oprot); err != nil { + return err + } + if err := p.writeField4(oprot); err != nil { + return err + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *PerOperationSamplingStrategies) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("defaultSamplingProbability", thrift.DOUBLE, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:defaultSamplingProbability: ", p), err) + } + if err := oprot.WriteDouble(float64(p.DefaultSamplingProbability)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.defaultSamplingProbability (1) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:defaultSamplingProbability: ", p), err) + } + return err +} + +func (p *PerOperationSamplingStrategies) writeField2(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("defaultLowerBoundTracesPerSecond", thrift.DOUBLE, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:defaultLowerBoundTracesPerSecond: ", p), err) + } + if err := oprot.WriteDouble(float64(p.DefaultLowerBoundTracesPerSecond)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.defaultLowerBoundTracesPerSecond (2) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:defaultLowerBoundTracesPerSecond: ", p), err) + } + return err +} + +func (p *PerOperationSamplingStrategies) writeField3(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("perOperationStrategies", thrift.LIST, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:perOperationStrategies: ", p), err) + } + if err := oprot.WriteListBegin(thrift.STRUCT, len(p.PerOperationStrategies)); err != nil { + return thrift.PrependError("error writing list begin: ", err) + } + for _, v := range p.PerOperationStrategies { + if err := v.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) + } + } + if err := oprot.WriteListEnd(); err != nil { + return thrift.PrependError("error writing list end: ", err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:perOperationStrategies: ", p), err) + } + return err +} + +func (p *PerOperationSamplingStrategies) writeField4(oprot thrift.TProtocol) (err error) { + if p.IsSetDefaultUpperBoundTracesPerSecond() { + if err := oprot.WriteFieldBegin("defaultUpperBoundTracesPerSecond", thrift.DOUBLE, 4); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:defaultUpperBoundTracesPerSecond: ", p), err) + } + if err := oprot.WriteDouble(float64(*p.DefaultUpperBoundTracesPerSecond)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.defaultUpperBoundTracesPerSecond (4) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 4:defaultUpperBoundTracesPerSecond: ", p), err) + } + } + return err +} + +func (p *PerOperationSamplingStrategies) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("PerOperationSamplingStrategies(%+v)", *p) +} + +// Attributes: +// - StrategyType +// - ProbabilisticSampling +// - RateLimitingSampling +// - OperationSampling +type SamplingStrategyResponse struct { + StrategyType SamplingStrategyType `thrift:"strategyType,1,required" json:"strategyType"` + ProbabilisticSampling *ProbabilisticSamplingStrategy `thrift:"probabilisticSampling,2" json:"probabilisticSampling,omitempty"` + RateLimitingSampling *RateLimitingSamplingStrategy `thrift:"rateLimitingSampling,3" json:"rateLimitingSampling,omitempty"` + OperationSampling *PerOperationSamplingStrategies `thrift:"operationSampling,4" json:"operationSampling,omitempty"` +} + +func NewSamplingStrategyResponse() *SamplingStrategyResponse { + return &SamplingStrategyResponse{} +} + +func (p *SamplingStrategyResponse) GetStrategyType() SamplingStrategyType { + return p.StrategyType +} + +var SamplingStrategyResponse_ProbabilisticSampling_DEFAULT *ProbabilisticSamplingStrategy + +func (p *SamplingStrategyResponse) GetProbabilisticSampling() *ProbabilisticSamplingStrategy { + if !p.IsSetProbabilisticSampling() { + return SamplingStrategyResponse_ProbabilisticSampling_DEFAULT + } + return p.ProbabilisticSampling +} + +var SamplingStrategyResponse_RateLimitingSampling_DEFAULT *RateLimitingSamplingStrategy + +func (p *SamplingStrategyResponse) GetRateLimitingSampling() *RateLimitingSamplingStrategy { + if !p.IsSetRateLimitingSampling() { + return SamplingStrategyResponse_RateLimitingSampling_DEFAULT + } + return p.RateLimitingSampling +} + +var SamplingStrategyResponse_OperationSampling_DEFAULT *PerOperationSamplingStrategies + +func (p *SamplingStrategyResponse) GetOperationSampling() *PerOperationSamplingStrategies { + if !p.IsSetOperationSampling() { + return SamplingStrategyResponse_OperationSampling_DEFAULT + } + return p.OperationSampling +} +func (p *SamplingStrategyResponse) IsSetProbabilisticSampling() bool { + return p.ProbabilisticSampling != nil +} + +func (p *SamplingStrategyResponse) IsSetRateLimitingSampling() bool { + return p.RateLimitingSampling != nil +} + +func (p *SamplingStrategyResponse) IsSetOperationSampling() bool { + return p.OperationSampling != nil +} + +func (p *SamplingStrategyResponse) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + var issetStrategyType bool = false + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if err := p.readField1(iprot); err != nil { + return err + } + issetStrategyType = true + case 2: + if err := p.readField2(iprot); err != nil { + return err + } + case 3: + if err := p.readField3(iprot); err != nil { + return err + } + case 4: + if err := p.readField4(iprot); err != nil { + return err + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + if !issetStrategyType { + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field StrategyType is not set")) + } + return nil +} + +func (p *SamplingStrategyResponse) readField1(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 1: ", err) + } else { + temp := SamplingStrategyType(v) + p.StrategyType = temp + } + return nil +} + +func (p *SamplingStrategyResponse) readField2(iprot thrift.TProtocol) error { + p.ProbabilisticSampling = &ProbabilisticSamplingStrategy{} + if err := p.ProbabilisticSampling.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.ProbabilisticSampling), err) + } + return nil +} + +func (p *SamplingStrategyResponse) readField3(iprot thrift.TProtocol) error { + p.RateLimitingSampling = &RateLimitingSamplingStrategy{} + if err := p.RateLimitingSampling.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.RateLimitingSampling), err) + } + return nil +} + +func (p *SamplingStrategyResponse) readField4(iprot thrift.TProtocol) error { + p.OperationSampling = &PerOperationSamplingStrategies{} + if err := p.OperationSampling.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.OperationSampling), err) + } + return nil +} + +func (p *SamplingStrategyResponse) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("SamplingStrategyResponse"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + if err := p.writeField3(oprot); err != nil { + return err + } + if err := p.writeField4(oprot); err != nil { + return err + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *SamplingStrategyResponse) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("strategyType", thrift.I32, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:strategyType: ", p), err) + } + if err := oprot.WriteI32(int32(p.StrategyType)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.strategyType (1) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:strategyType: ", p), err) + } + return err +} + +func (p *SamplingStrategyResponse) writeField2(oprot thrift.TProtocol) (err error) { + if p.IsSetProbabilisticSampling() { + if err := oprot.WriteFieldBegin("probabilisticSampling", thrift.STRUCT, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:probabilisticSampling: ", p), err) + } + if err := p.ProbabilisticSampling.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.ProbabilisticSampling), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:probabilisticSampling: ", p), err) + } + } + return err +} + +func (p *SamplingStrategyResponse) writeField3(oprot thrift.TProtocol) (err error) { + if p.IsSetRateLimitingSampling() { + if err := oprot.WriteFieldBegin("rateLimitingSampling", thrift.STRUCT, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:rateLimitingSampling: ", p), err) + } + if err := p.RateLimitingSampling.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.RateLimitingSampling), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:rateLimitingSampling: ", p), err) + } + } + return err +} + +func (p *SamplingStrategyResponse) writeField4(oprot thrift.TProtocol) (err error) { + if p.IsSetOperationSampling() { + if err := oprot.WriteFieldBegin("operationSampling", thrift.STRUCT, 4); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:operationSampling: ", p), err) + } + if err := p.OperationSampling.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.OperationSampling), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 4:operationSampling: ", p), err) + } + } + return err +} + +func (p *SamplingStrategyResponse) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("SamplingStrategyResponse(%+v)", *p) +} diff --git a/vendor/github.com/uber/jaeger-client-go/thrift-gen/zipkincore/constants.go b/vendor/github.com/uber/jaeger-client-go/thrift-gen/zipkincore/constants.go new file mode 100644 index 000000000000..f05144bfc694 --- /dev/null +++ b/vendor/github.com/uber/jaeger-client-go/thrift-gen/zipkincore/constants.go @@ -0,0 +1,32 @@ +// Autogenerated by Thrift Compiler (0.9.3) +// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING + +package zipkincore + +import ( + "bytes" + "fmt" + "github.com/uber/jaeger-client-go/thrift" +) + +// (needed to ensure safety because of naive import list construction.) +var _ = thrift.ZERO +var _ = fmt.Printf +var _ = bytes.Equal + +const CLIENT_SEND = "cs" +const CLIENT_RECV = "cr" +const SERVER_SEND = "ss" +const SERVER_RECV = "sr" +const WIRE_SEND = "ws" +const WIRE_RECV = "wr" +const CLIENT_SEND_FRAGMENT = "csf" +const CLIENT_RECV_FRAGMENT = "crf" +const SERVER_SEND_FRAGMENT = "ssf" +const SERVER_RECV_FRAGMENT = "srf" +const LOCAL_COMPONENT = "lc" +const CLIENT_ADDR = "ca" +const SERVER_ADDR = "sa" + +func init() { +} diff --git a/vendor/github.com/uber/jaeger-client-go/thrift-gen/zipkincore/ttypes.go b/vendor/github.com/uber/jaeger-client-go/thrift-gen/zipkincore/ttypes.go new file mode 100644 index 000000000000..34b2b267eb52 --- /dev/null +++ b/vendor/github.com/uber/jaeger-client-go/thrift-gen/zipkincore/ttypes.go @@ -0,0 +1,1247 @@ +// Autogenerated by Thrift Compiler (0.9.3) +// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING + +package zipkincore + +import ( + "bytes" + "fmt" + "github.com/uber/jaeger-client-go/thrift" +) + +// (needed to ensure safety because of naive import list construction.) +var _ = thrift.ZERO +var _ = fmt.Printf +var _ = bytes.Equal + +var GoUnusedProtection__ int + +type AnnotationType int64 + +const ( + AnnotationType_BOOL AnnotationType = 0 + AnnotationType_BYTES AnnotationType = 1 + AnnotationType_I16 AnnotationType = 2 + AnnotationType_I32 AnnotationType = 3 + AnnotationType_I64 AnnotationType = 4 + AnnotationType_DOUBLE AnnotationType = 5 + AnnotationType_STRING AnnotationType = 6 +) + +func (p AnnotationType) String() string { + switch p { + case AnnotationType_BOOL: + return "BOOL" + case AnnotationType_BYTES: + return "BYTES" + case AnnotationType_I16: + return "I16" + case AnnotationType_I32: + return "I32" + case AnnotationType_I64: + return "I64" + case AnnotationType_DOUBLE: + return "DOUBLE" + case AnnotationType_STRING: + return "STRING" + } + return "" +} + +func AnnotationTypeFromString(s string) (AnnotationType, error) { + switch s { + case "BOOL": + return AnnotationType_BOOL, nil + case "BYTES": + return AnnotationType_BYTES, nil + case "I16": + return AnnotationType_I16, nil + case "I32": + return AnnotationType_I32, nil + case "I64": + return AnnotationType_I64, nil + case "DOUBLE": + return AnnotationType_DOUBLE, nil + case "STRING": + return AnnotationType_STRING, nil + } + return AnnotationType(0), fmt.Errorf("not a valid AnnotationType string") +} + +func AnnotationTypePtr(v AnnotationType) *AnnotationType { return &v } + +func (p AnnotationType) MarshalText() ([]byte, error) { + return []byte(p.String()), nil +} + +func (p *AnnotationType) UnmarshalText(text []byte) error { + q, err := AnnotationTypeFromString(string(text)) + if err != nil { + return err + } + *p = q + return nil +} + +// Indicates the network context of a service recording an annotation with two +// exceptions. +// +// When a BinaryAnnotation, and key is CLIENT_ADDR or SERVER_ADDR, +// the endpoint indicates the source or destination of an RPC. This exception +// allows zipkin to display network context of uninstrumented services, or +// clients such as web browsers. +// +// Attributes: +// - Ipv4: IPv4 host address packed into 4 bytes. +// +// Ex for the ip 1.2.3.4, it would be (1 << 24) | (2 << 16) | (3 << 8) | 4 +// - Port: IPv4 port +// +// Note: this is to be treated as an unsigned integer, so watch for negatives. +// +// Conventionally, when the port isn't known, port = 0. +// - ServiceName: Service name in lowercase, such as "memcache" or "zipkin-web" +// +// Conventionally, when the service name isn't known, service_name = "unknown". +type Endpoint struct { + Ipv4 int32 `thrift:"ipv4,1" json:"ipv4"` + Port int16 `thrift:"port,2" json:"port"` + ServiceName string `thrift:"service_name,3" json:"service_name"` +} + +func NewEndpoint() *Endpoint { + return &Endpoint{} +} + +func (p *Endpoint) GetIpv4() int32 { + return p.Ipv4 +} + +func (p *Endpoint) GetPort() int16 { + return p.Port +} + +func (p *Endpoint) GetServiceName() string { + return p.ServiceName +} +func (p *Endpoint) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if err := p.readField1(iprot); err != nil { + return err + } + case 2: + if err := p.readField2(iprot); err != nil { + return err + } + case 3: + if err := p.readField3(iprot); err != nil { + return err + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *Endpoint) readField1(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 1: ", err) + } else { + p.Ipv4 = v + } + return nil +} + +func (p *Endpoint) readField2(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI16(); err != nil { + return thrift.PrependError("error reading field 2: ", err) + } else { + p.Port = v + } + return nil +} + +func (p *Endpoint) readField3(iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 3: ", err) + } else { + p.ServiceName = v + } + return nil +} + +func (p *Endpoint) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("Endpoint"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + if err := p.writeField3(oprot); err != nil { + return err + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *Endpoint) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("ipv4", thrift.I32, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:ipv4: ", p), err) + } + if err := oprot.WriteI32(int32(p.Ipv4)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.ipv4 (1) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:ipv4: ", p), err) + } + return err +} + +func (p *Endpoint) writeField2(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("port", thrift.I16, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:port: ", p), err) + } + if err := oprot.WriteI16(int16(p.Port)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.port (2) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:port: ", p), err) + } + return err +} + +func (p *Endpoint) writeField3(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("service_name", thrift.STRING, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:service_name: ", p), err) + } + if err := oprot.WriteString(string(p.ServiceName)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.service_name (3) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:service_name: ", p), err) + } + return err +} + +func (p *Endpoint) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("Endpoint(%+v)", *p) +} + +// An annotation is similar to a log statement. It includes a host field which +// allows these events to be attributed properly, and also aggregatable. +// +// Attributes: +// - Timestamp: Microseconds from epoch. +// +// This value should use the most precise value possible. For example, +// gettimeofday or syncing nanoTime against a tick of currentTimeMillis. +// - Value +// - Host: Always the host that recorded the event. By specifying the host you allow +// rollup of all events (such as client requests to a service) by IP address. +type Annotation struct { + Timestamp int64 `thrift:"timestamp,1" json:"timestamp"` + Value string `thrift:"value,2" json:"value"` + Host *Endpoint `thrift:"host,3" json:"host,omitempty"` +} + +func NewAnnotation() *Annotation { + return &Annotation{} +} + +func (p *Annotation) GetTimestamp() int64 { + return p.Timestamp +} + +func (p *Annotation) GetValue() string { + return p.Value +} + +var Annotation_Host_DEFAULT *Endpoint + +func (p *Annotation) GetHost() *Endpoint { + if !p.IsSetHost() { + return Annotation_Host_DEFAULT + } + return p.Host +} +func (p *Annotation) IsSetHost() bool { + return p.Host != nil +} + +func (p *Annotation) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if err := p.readField1(iprot); err != nil { + return err + } + case 2: + if err := p.readField2(iprot); err != nil { + return err + } + case 3: + if err := p.readField3(iprot); err != nil { + return err + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *Annotation) readField1(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(); err != nil { + return thrift.PrependError("error reading field 1: ", err) + } else { + p.Timestamp = v + } + return nil +} + +func (p *Annotation) readField2(iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 2: ", err) + } else { + p.Value = v + } + return nil +} + +func (p *Annotation) readField3(iprot thrift.TProtocol) error { + p.Host = &Endpoint{} + if err := p.Host.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Host), err) + } + return nil +} + +func (p *Annotation) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("Annotation"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + if err := p.writeField3(oprot); err != nil { + return err + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *Annotation) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("timestamp", thrift.I64, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:timestamp: ", p), err) + } + if err := oprot.WriteI64(int64(p.Timestamp)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.timestamp (1) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:timestamp: ", p), err) + } + return err +} + +func (p *Annotation) writeField2(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("value", thrift.STRING, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:value: ", p), err) + } + if err := oprot.WriteString(string(p.Value)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.value (2) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:value: ", p), err) + } + return err +} + +func (p *Annotation) writeField3(oprot thrift.TProtocol) (err error) { + if p.IsSetHost() { + if err := oprot.WriteFieldBegin("host", thrift.STRUCT, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:host: ", p), err) + } + if err := p.Host.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Host), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:host: ", p), err) + } + } + return err +} + +func (p *Annotation) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("Annotation(%+v)", *p) +} + +// Binary annotations are tags applied to a Span to give it context. For +// example, a binary annotation of "http.uri" could the path to a resource in a +// RPC call. +// +// Binary annotations of type STRING are always queryable, though more a +// historical implementation detail than a structural concern. +// +// Binary annotations can repeat, and vary on the host. Similar to Annotation, +// the host indicates who logged the event. This allows you to tell the +// difference between the client and server side of the same key. For example, +// the key "http.uri" might be different on the client and server side due to +// rewriting, like "/api/v1/myresource" vs "/myresource. Via the host field, +// you can see the different points of view, which often help in debugging. +// +// Attributes: +// - Key +// - Value +// - AnnotationType +// - Host: The host that recorded tag, which allows you to differentiate between +// multiple tags with the same key. There are two exceptions to this. +// +// When the key is CLIENT_ADDR or SERVER_ADDR, host indicates the source or +// destination of an RPC. This exception allows zipkin to display network +// context of uninstrumented services, or clients such as web browsers. +type BinaryAnnotation struct { + Key string `thrift:"key,1" json:"key"` + Value []byte `thrift:"value,2" json:"value"` + AnnotationType AnnotationType `thrift:"annotation_type,3" json:"annotation_type"` + Host *Endpoint `thrift:"host,4" json:"host,omitempty"` +} + +func NewBinaryAnnotation() *BinaryAnnotation { + return &BinaryAnnotation{} +} + +func (p *BinaryAnnotation) GetKey() string { + return p.Key +} + +func (p *BinaryAnnotation) GetValue() []byte { + return p.Value +} + +func (p *BinaryAnnotation) GetAnnotationType() AnnotationType { + return p.AnnotationType +} + +var BinaryAnnotation_Host_DEFAULT *Endpoint + +func (p *BinaryAnnotation) GetHost() *Endpoint { + if !p.IsSetHost() { + return BinaryAnnotation_Host_DEFAULT + } + return p.Host +} +func (p *BinaryAnnotation) IsSetHost() bool { + return p.Host != nil +} + +func (p *BinaryAnnotation) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if err := p.readField1(iprot); err != nil { + return err + } + case 2: + if err := p.readField2(iprot); err != nil { + return err + } + case 3: + if err := p.readField3(iprot); err != nil { + return err + } + case 4: + if err := p.readField4(iprot); err != nil { + return err + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *BinaryAnnotation) readField1(iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 1: ", err) + } else { + p.Key = v + } + return nil +} + +func (p *BinaryAnnotation) readField2(iprot thrift.TProtocol) error { + if v, err := iprot.ReadBinary(); err != nil { + return thrift.PrependError("error reading field 2: ", err) + } else { + p.Value = v + } + return nil +} + +func (p *BinaryAnnotation) readField3(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 3: ", err) + } else { + temp := AnnotationType(v) + p.AnnotationType = temp + } + return nil +} + +func (p *BinaryAnnotation) readField4(iprot thrift.TProtocol) error { + p.Host = &Endpoint{} + if err := p.Host.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Host), err) + } + return nil +} + +func (p *BinaryAnnotation) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("BinaryAnnotation"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + if err := p.writeField3(oprot); err != nil { + return err + } + if err := p.writeField4(oprot); err != nil { + return err + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *BinaryAnnotation) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("key", thrift.STRING, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:key: ", p), err) + } + if err := oprot.WriteString(string(p.Key)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.key (1) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:key: ", p), err) + } + return err +} + +func (p *BinaryAnnotation) writeField2(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("value", thrift.STRING, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:value: ", p), err) + } + if err := oprot.WriteBinary(p.Value); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.value (2) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:value: ", p), err) + } + return err +} + +func (p *BinaryAnnotation) writeField3(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("annotation_type", thrift.I32, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:annotation_type: ", p), err) + } + if err := oprot.WriteI32(int32(p.AnnotationType)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.annotation_type (3) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:annotation_type: ", p), err) + } + return err +} + +func (p *BinaryAnnotation) writeField4(oprot thrift.TProtocol) (err error) { + if p.IsSetHost() { + if err := oprot.WriteFieldBegin("host", thrift.STRUCT, 4); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:host: ", p), err) + } + if err := p.Host.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Host), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 4:host: ", p), err) + } + } + return err +} + +func (p *BinaryAnnotation) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("BinaryAnnotation(%+v)", *p) +} + +// A trace is a series of spans (often RPC calls) which form a latency tree. +// +// The root span is where trace_id = id and parent_id = Nil. The root span is +// usually the longest interval in the trace, starting with a SERVER_RECV +// annotation and ending with a SERVER_SEND. +// +// Attributes: +// - TraceID +// - Name: Span name in lowercase, rpc method for example +// +// Conventionally, when the span name isn't known, name = "unknown". +// - ID +// - ParentID +// - Annotations +// - BinaryAnnotations +// - Debug +// - Timestamp: Microseconds from epoch of the creation of this span. +// +// This value should be set directly by instrumentation, using the most +// precise value possible. For example, gettimeofday or syncing nanoTime +// against a tick of currentTimeMillis. +// +// For compatibilty with instrumentation that precede this field, collectors +// or span stores can derive this via Annotation.timestamp. +// For example, SERVER_RECV.timestamp or CLIENT_SEND.timestamp. +// +// This field is optional for compatibility with old data: first-party span +// stores are expected to support this at time of introduction. +// - Duration: Measurement of duration in microseconds, used to support queries. +// +// This value should be set directly, where possible. Doing so encourages +// precise measurement decoupled from problems of clocks, such as skew or NTP +// updates causing time to move backwards. +// +// For compatibilty with instrumentation that precede this field, collectors +// or span stores can derive this by subtracting Annotation.timestamp. +// For example, SERVER_SEND.timestamp - SERVER_RECV.timestamp. +// +// If this field is persisted as unset, zipkin will continue to work, except +// duration query support will be implementation-specific. Similarly, setting +// this field non-atomically is implementation-specific. +// +// This field is i64 vs i32 to support spans longer than 35 minutes. +type Span struct { + TraceID int64 `thrift:"trace_id,1" json:"trace_id"` + // unused field # 2 + Name string `thrift:"name,3" json:"name"` + ID int64 `thrift:"id,4" json:"id"` + ParentID *int64 `thrift:"parent_id,5" json:"parent_id,omitempty"` + Annotations []*Annotation `thrift:"annotations,6" json:"annotations"` + // unused field # 7 + BinaryAnnotations []*BinaryAnnotation `thrift:"binary_annotations,8" json:"binary_annotations"` + Debug bool `thrift:"debug,9" json:"debug,omitempty"` + Timestamp *int64 `thrift:"timestamp,10" json:"timestamp,omitempty"` + Duration *int64 `thrift:"duration,11" json:"duration,omitempty"` +} + +func NewSpan() *Span { + return &Span{} +} + +func (p *Span) GetTraceID() int64 { + return p.TraceID +} + +func (p *Span) GetName() string { + return p.Name +} + +func (p *Span) GetID() int64 { + return p.ID +} + +var Span_ParentID_DEFAULT int64 + +func (p *Span) GetParentID() int64 { + if !p.IsSetParentID() { + return Span_ParentID_DEFAULT + } + return *p.ParentID +} + +func (p *Span) GetAnnotations() []*Annotation { + return p.Annotations +} + +func (p *Span) GetBinaryAnnotations() []*BinaryAnnotation { + return p.BinaryAnnotations +} + +var Span_Debug_DEFAULT bool = false + +func (p *Span) GetDebug() bool { + return p.Debug +} + +var Span_Timestamp_DEFAULT int64 + +func (p *Span) GetTimestamp() int64 { + if !p.IsSetTimestamp() { + return Span_Timestamp_DEFAULT + } + return *p.Timestamp +} + +var Span_Duration_DEFAULT int64 + +func (p *Span) GetDuration() int64 { + if !p.IsSetDuration() { + return Span_Duration_DEFAULT + } + return *p.Duration +} +func (p *Span) IsSetParentID() bool { + return p.ParentID != nil +} + +func (p *Span) IsSetDebug() bool { + return p.Debug != Span_Debug_DEFAULT +} + +func (p *Span) IsSetTimestamp() bool { + return p.Timestamp != nil +} + +func (p *Span) IsSetDuration() bool { + return p.Duration != nil +} + +func (p *Span) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if err := p.readField1(iprot); err != nil { + return err + } + case 3: + if err := p.readField3(iprot); err != nil { + return err + } + case 4: + if err := p.readField4(iprot); err != nil { + return err + } + case 5: + if err := p.readField5(iprot); err != nil { + return err + } + case 6: + if err := p.readField6(iprot); err != nil { + return err + } + case 8: + if err := p.readField8(iprot); err != nil { + return err + } + case 9: + if err := p.readField9(iprot); err != nil { + return err + } + case 10: + if err := p.readField10(iprot); err != nil { + return err + } + case 11: + if err := p.readField11(iprot); err != nil { + return err + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *Span) readField1(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(); err != nil { + return thrift.PrependError("error reading field 1: ", err) + } else { + p.TraceID = v + } + return nil +} + +func (p *Span) readField3(iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 3: ", err) + } else { + p.Name = v + } + return nil +} + +func (p *Span) readField4(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(); err != nil { + return thrift.PrependError("error reading field 4: ", err) + } else { + p.ID = v + } + return nil +} + +func (p *Span) readField5(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(); err != nil { + return thrift.PrependError("error reading field 5: ", err) + } else { + p.ParentID = &v + } + return nil +} + +func (p *Span) readField6(iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin() + if err != nil { + return thrift.PrependError("error reading list begin: ", err) + } + tSlice := make([]*Annotation, 0, size) + p.Annotations = tSlice + for i := 0; i < size; i++ { + _elem0 := &Annotation{} + if err := _elem0.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem0), err) + } + p.Annotations = append(p.Annotations, _elem0) + } + if err := iprot.ReadListEnd(); err != nil { + return thrift.PrependError("error reading list end: ", err) + } + return nil +} + +func (p *Span) readField8(iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin() + if err != nil { + return thrift.PrependError("error reading list begin: ", err) + } + tSlice := make([]*BinaryAnnotation, 0, size) + p.BinaryAnnotations = tSlice + for i := 0; i < size; i++ { + _elem1 := &BinaryAnnotation{} + if err := _elem1.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem1), err) + } + p.BinaryAnnotations = append(p.BinaryAnnotations, _elem1) + } + if err := iprot.ReadListEnd(); err != nil { + return thrift.PrependError("error reading list end: ", err) + } + return nil +} + +func (p *Span) readField9(iprot thrift.TProtocol) error { + if v, err := iprot.ReadBool(); err != nil { + return thrift.PrependError("error reading field 9: ", err) + } else { + p.Debug = v + } + return nil +} + +func (p *Span) readField10(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(); err != nil { + return thrift.PrependError("error reading field 10: ", err) + } else { + p.Timestamp = &v + } + return nil +} + +func (p *Span) readField11(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(); err != nil { + return thrift.PrependError("error reading field 11: ", err) + } else { + p.Duration = &v + } + return nil +} + +func (p *Span) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("Span"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField3(oprot); err != nil { + return err + } + if err := p.writeField4(oprot); err != nil { + return err + } + if err := p.writeField5(oprot); err != nil { + return err + } + if err := p.writeField6(oprot); err != nil { + return err + } + if err := p.writeField8(oprot); err != nil { + return err + } + if err := p.writeField9(oprot); err != nil { + return err + } + if err := p.writeField10(oprot); err != nil { + return err + } + if err := p.writeField11(oprot); err != nil { + return err + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *Span) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("trace_id", thrift.I64, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:trace_id: ", p), err) + } + if err := oprot.WriteI64(int64(p.TraceID)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.trace_id (1) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:trace_id: ", p), err) + } + return err +} + +func (p *Span) writeField3(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("name", thrift.STRING, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:name: ", p), err) + } + if err := oprot.WriteString(string(p.Name)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.name (3) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:name: ", p), err) + } + return err +} + +func (p *Span) writeField4(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("id", thrift.I64, 4); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:id: ", p), err) + } + if err := oprot.WriteI64(int64(p.ID)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.id (4) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 4:id: ", p), err) + } + return err +} + +func (p *Span) writeField5(oprot thrift.TProtocol) (err error) { + if p.IsSetParentID() { + if err := oprot.WriteFieldBegin("parent_id", thrift.I64, 5); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 5:parent_id: ", p), err) + } + if err := oprot.WriteI64(int64(*p.ParentID)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.parent_id (5) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 5:parent_id: ", p), err) + } + } + return err +} + +func (p *Span) writeField6(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("annotations", thrift.LIST, 6); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 6:annotations: ", p), err) + } + if err := oprot.WriteListBegin(thrift.STRUCT, len(p.Annotations)); err != nil { + return thrift.PrependError("error writing list begin: ", err) + } + for _, v := range p.Annotations { + if err := v.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) + } + } + if err := oprot.WriteListEnd(); err != nil { + return thrift.PrependError("error writing list end: ", err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 6:annotations: ", p), err) + } + return err +} + +func (p *Span) writeField8(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("binary_annotations", thrift.LIST, 8); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 8:binary_annotations: ", p), err) + } + if err := oprot.WriteListBegin(thrift.STRUCT, len(p.BinaryAnnotations)); err != nil { + return thrift.PrependError("error writing list begin: ", err) + } + for _, v := range p.BinaryAnnotations { + if err := v.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) + } + } + if err := oprot.WriteListEnd(); err != nil { + return thrift.PrependError("error writing list end: ", err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 8:binary_annotations: ", p), err) + } + return err +} + +func (p *Span) writeField9(oprot thrift.TProtocol) (err error) { + if p.IsSetDebug() { + if err := oprot.WriteFieldBegin("debug", thrift.BOOL, 9); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 9:debug: ", p), err) + } + if err := oprot.WriteBool(bool(p.Debug)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.debug (9) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 9:debug: ", p), err) + } + } + return err +} + +func (p *Span) writeField10(oprot thrift.TProtocol) (err error) { + if p.IsSetTimestamp() { + if err := oprot.WriteFieldBegin("timestamp", thrift.I64, 10); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 10:timestamp: ", p), err) + } + if err := oprot.WriteI64(int64(*p.Timestamp)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.timestamp (10) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 10:timestamp: ", p), err) + } + } + return err +} + +func (p *Span) writeField11(oprot thrift.TProtocol) (err error) { + if p.IsSetDuration() { + if err := oprot.WriteFieldBegin("duration", thrift.I64, 11); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 11:duration: ", p), err) + } + if err := oprot.WriteI64(int64(*p.Duration)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.duration (11) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 11:duration: ", p), err) + } + } + return err +} + +func (p *Span) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("Span(%+v)", *p) +} + +// Attributes: +// - Ok +type Response struct { + Ok bool `thrift:"ok,1,required" json:"ok"` +} + +func NewResponse() *Response { + return &Response{} +} + +func (p *Response) GetOk() bool { + return p.Ok +} +func (p *Response) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + var issetOk bool = false + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if err := p.readField1(iprot); err != nil { + return err + } + issetOk = true + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + if !issetOk { + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Ok is not set")) + } + return nil +} + +func (p *Response) readField1(iprot thrift.TProtocol) error { + if v, err := iprot.ReadBool(); err != nil { + return thrift.PrependError("error reading field 1: ", err) + } else { + p.Ok = v + } + return nil +} + +func (p *Response) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("Response"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if err := p.writeField1(oprot); err != nil { + return err + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *Response) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("ok", thrift.BOOL, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:ok: ", p), err) + } + if err := oprot.WriteBool(bool(p.Ok)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.ok (1) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:ok: ", p), err) + } + return err +} + +func (p *Response) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("Response(%+v)", *p) +} diff --git a/vendor/github.com/uber/jaeger-client-go/thrift-gen/zipkincore/zipkincollector.go b/vendor/github.com/uber/jaeger-client-go/thrift-gen/zipkincore/zipkincollector.go new file mode 100644 index 000000000000..417e883d0e31 --- /dev/null +++ b/vendor/github.com/uber/jaeger-client-go/thrift-gen/zipkincore/zipkincollector.go @@ -0,0 +1,446 @@ +// Autogenerated by Thrift Compiler (0.9.3) +// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING + +package zipkincore + +import ( + "bytes" + "fmt" + "github.com/uber/jaeger-client-go/thrift" +) + +// (needed to ensure safety because of naive import list construction.) +var _ = thrift.ZERO +var _ = fmt.Printf +var _ = bytes.Equal + +type ZipkinCollector interface { + // Parameters: + // - Spans + SubmitZipkinBatch(spans []*Span) (r []*Response, err error) +} + +type ZipkinCollectorClient struct { + Transport thrift.TTransport + ProtocolFactory thrift.TProtocolFactory + InputProtocol thrift.TProtocol + OutputProtocol thrift.TProtocol + SeqId int32 +} + +func NewZipkinCollectorClientFactory(t thrift.TTransport, f thrift.TProtocolFactory) *ZipkinCollectorClient { + return &ZipkinCollectorClient{Transport: t, + ProtocolFactory: f, + InputProtocol: f.GetProtocol(t), + OutputProtocol: f.GetProtocol(t), + SeqId: 0, + } +} + +func NewZipkinCollectorClientProtocol(t thrift.TTransport, iprot thrift.TProtocol, oprot thrift.TProtocol) *ZipkinCollectorClient { + return &ZipkinCollectorClient{Transport: t, + ProtocolFactory: nil, + InputProtocol: iprot, + OutputProtocol: oprot, + SeqId: 0, + } +} + +// Parameters: +// - Spans +func (p *ZipkinCollectorClient) SubmitZipkinBatch(spans []*Span) (r []*Response, err error) { + if err = p.sendSubmitZipkinBatch(spans); err != nil { + return + } + return p.recvSubmitZipkinBatch() +} + +func (p *ZipkinCollectorClient) sendSubmitZipkinBatch(spans []*Span) (err error) { + oprot := p.OutputProtocol + if oprot == nil { + oprot = p.ProtocolFactory.GetProtocol(p.Transport) + p.OutputProtocol = oprot + } + p.SeqId++ + if err = oprot.WriteMessageBegin("submitZipkinBatch", thrift.CALL, p.SeqId); err != nil { + return + } + args := ZipkinCollectorSubmitZipkinBatchArgs{ + Spans: spans, + } + if err = args.Write(oprot); err != nil { + return + } + if err = oprot.WriteMessageEnd(); err != nil { + return + } + return oprot.Flush() +} + +func (p *ZipkinCollectorClient) recvSubmitZipkinBatch() (value []*Response, err error) { + iprot := p.InputProtocol + if iprot == nil { + iprot = p.ProtocolFactory.GetProtocol(p.Transport) + p.InputProtocol = iprot + } + method, mTypeId, seqId, err := iprot.ReadMessageBegin() + if err != nil { + return + } + if method != "submitZipkinBatch" { + err = thrift.NewTApplicationException(thrift.WRONG_METHOD_NAME, "submitZipkinBatch failed: wrong method name") + return + } + if p.SeqId != seqId { + err = thrift.NewTApplicationException(thrift.BAD_SEQUENCE_ID, "submitZipkinBatch failed: out of sequence response") + return + } + if mTypeId == thrift.EXCEPTION { + error2 := thrift.NewTApplicationException(thrift.UNKNOWN_APPLICATION_EXCEPTION, "Unknown Exception") + var error3 error + error3, err = error2.Read(iprot) + if err != nil { + return + } + if err = iprot.ReadMessageEnd(); err != nil { + return + } + err = error3 + return + } + if mTypeId != thrift.REPLY { + err = thrift.NewTApplicationException(thrift.INVALID_MESSAGE_TYPE_EXCEPTION, "submitZipkinBatch failed: invalid message type") + return + } + result := ZipkinCollectorSubmitZipkinBatchResult{} + if err = result.Read(iprot); err != nil { + return + } + if err = iprot.ReadMessageEnd(); err != nil { + return + } + value = result.GetSuccess() + return +} + +type ZipkinCollectorProcessor struct { + processorMap map[string]thrift.TProcessorFunction + handler ZipkinCollector +} + +func (p *ZipkinCollectorProcessor) AddToProcessorMap(key string, processor thrift.TProcessorFunction) { + p.processorMap[key] = processor +} + +func (p *ZipkinCollectorProcessor) GetProcessorFunction(key string) (processor thrift.TProcessorFunction, ok bool) { + processor, ok = p.processorMap[key] + return processor, ok +} + +func (p *ZipkinCollectorProcessor) ProcessorMap() map[string]thrift.TProcessorFunction { + return p.processorMap +} + +func NewZipkinCollectorProcessor(handler ZipkinCollector) *ZipkinCollectorProcessor { + + self4 := &ZipkinCollectorProcessor{handler: handler, processorMap: make(map[string]thrift.TProcessorFunction)} + self4.processorMap["submitZipkinBatch"] = &zipkinCollectorProcessorSubmitZipkinBatch{handler: handler} + return self4 +} + +func (p *ZipkinCollectorProcessor) Process(iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + name, _, seqId, err := iprot.ReadMessageBegin() + if err != nil { + return false, err + } + if processor, ok := p.GetProcessorFunction(name); ok { + return processor.Process(seqId, iprot, oprot) + } + iprot.Skip(thrift.STRUCT) + iprot.ReadMessageEnd() + x5 := thrift.NewTApplicationException(thrift.UNKNOWN_METHOD, "Unknown function "+name) + oprot.WriteMessageBegin(name, thrift.EXCEPTION, seqId) + x5.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush() + return false, x5 + +} + +type zipkinCollectorProcessorSubmitZipkinBatch struct { + handler ZipkinCollector +} + +func (p *zipkinCollectorProcessorSubmitZipkinBatch) Process(seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + args := ZipkinCollectorSubmitZipkinBatchArgs{} + if err = args.Read(iprot); err != nil { + iprot.ReadMessageEnd() + x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) + oprot.WriteMessageBegin("submitZipkinBatch", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush() + return false, err + } + + iprot.ReadMessageEnd() + result := ZipkinCollectorSubmitZipkinBatchResult{} + var retval []*Response + var err2 error + if retval, err2 = p.handler.SubmitZipkinBatch(args.Spans); err2 != nil { + x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing submitZipkinBatch: "+err2.Error()) + oprot.WriteMessageBegin("submitZipkinBatch", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush() + return true, err2 + } else { + result.Success = retval + } + if err2 = oprot.WriteMessageBegin("submitZipkinBatch", thrift.REPLY, seqId); err2 != nil { + err = err2 + } + if err2 = result.Write(oprot); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.Flush(); err == nil && err2 != nil { + err = err2 + } + if err != nil { + return + } + return true, err +} + +// HELPER FUNCTIONS AND STRUCTURES + +// Attributes: +// - Spans +type ZipkinCollectorSubmitZipkinBatchArgs struct { + Spans []*Span `thrift:"spans,1" json:"spans"` +} + +func NewZipkinCollectorSubmitZipkinBatchArgs() *ZipkinCollectorSubmitZipkinBatchArgs { + return &ZipkinCollectorSubmitZipkinBatchArgs{} +} + +func (p *ZipkinCollectorSubmitZipkinBatchArgs) GetSpans() []*Span { + return p.Spans +} +func (p *ZipkinCollectorSubmitZipkinBatchArgs) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if err := p.readField1(iprot); err != nil { + return err + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *ZipkinCollectorSubmitZipkinBatchArgs) readField1(iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin() + if err != nil { + return thrift.PrependError("error reading list begin: ", err) + } + tSlice := make([]*Span, 0, size) + p.Spans = tSlice + for i := 0; i < size; i++ { + _elem6 := &Span{} + if err := _elem6.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem6), err) + } + p.Spans = append(p.Spans, _elem6) + } + if err := iprot.ReadListEnd(); err != nil { + return thrift.PrependError("error reading list end: ", err) + } + return nil +} + +func (p *ZipkinCollectorSubmitZipkinBatchArgs) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("submitZipkinBatch_args"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if err := p.writeField1(oprot); err != nil { + return err + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *ZipkinCollectorSubmitZipkinBatchArgs) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("spans", thrift.LIST, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:spans: ", p), err) + } + if err := oprot.WriteListBegin(thrift.STRUCT, len(p.Spans)); err != nil { + return thrift.PrependError("error writing list begin: ", err) + } + for _, v := range p.Spans { + if err := v.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) + } + } + if err := oprot.WriteListEnd(); err != nil { + return thrift.PrependError("error writing list end: ", err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:spans: ", p), err) + } + return err +} + +func (p *ZipkinCollectorSubmitZipkinBatchArgs) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("ZipkinCollectorSubmitZipkinBatchArgs(%+v)", *p) +} + +// Attributes: +// - Success +type ZipkinCollectorSubmitZipkinBatchResult struct { + Success []*Response `thrift:"success,0" json:"success,omitempty"` +} + +func NewZipkinCollectorSubmitZipkinBatchResult() *ZipkinCollectorSubmitZipkinBatchResult { + return &ZipkinCollectorSubmitZipkinBatchResult{} +} + +var ZipkinCollectorSubmitZipkinBatchResult_Success_DEFAULT []*Response + +func (p *ZipkinCollectorSubmitZipkinBatchResult) GetSuccess() []*Response { + return p.Success +} +func (p *ZipkinCollectorSubmitZipkinBatchResult) IsSetSuccess() bool { + return p.Success != nil +} + +func (p *ZipkinCollectorSubmitZipkinBatchResult) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 0: + if err := p.readField0(iprot); err != nil { + return err + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *ZipkinCollectorSubmitZipkinBatchResult) readField0(iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin() + if err != nil { + return thrift.PrependError("error reading list begin: ", err) + } + tSlice := make([]*Response, 0, size) + p.Success = tSlice + for i := 0; i < size; i++ { + _elem7 := &Response{} + if err := _elem7.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem7), err) + } + p.Success = append(p.Success, _elem7) + } + if err := iprot.ReadListEnd(); err != nil { + return thrift.PrependError("error reading list end: ", err) + } + return nil +} + +func (p *ZipkinCollectorSubmitZipkinBatchResult) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("submitZipkinBatch_result"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if err := p.writeField0(oprot); err != nil { + return err + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *ZipkinCollectorSubmitZipkinBatchResult) writeField0(oprot thrift.TProtocol) (err error) { + if p.IsSetSuccess() { + if err := oprot.WriteFieldBegin("success", thrift.LIST, 0); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 0:success: ", p), err) + } + if err := oprot.WriteListBegin(thrift.STRUCT, len(p.Success)); err != nil { + return thrift.PrependError("error writing list begin: ", err) + } + for _, v := range p.Success { + if err := v.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) + } + } + if err := oprot.WriteListEnd(); err != nil { + return thrift.PrependError("error writing list end: ", err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 0:success: ", p), err) + } + } + return err +} + +func (p *ZipkinCollectorSubmitZipkinBatchResult) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("ZipkinCollectorSubmitZipkinBatchResult(%+v)", *p) +} diff --git a/vendor/github.com/uber/jaeger-client-go/thrift/README.md b/vendor/github.com/uber/jaeger-client-go/thrift/README.md new file mode 100644 index 000000000000..1d8e642e028b --- /dev/null +++ b/vendor/github.com/uber/jaeger-client-go/thrift/README.md @@ -0,0 +1,7 @@ +# Apache Thrift + +This is a partial copy of Apache Thrift v0.10 (https://github.com/apache/thrift/commit/b2a4d4ae21c789b689dd162deb819665567f481c). + +It is vendored code to avoid compatibility issues introduced in Thrift v0.11. + +See https://github.com/jaegertracing/jaeger-client-go/pull/303. diff --git a/vendor/github.com/uber/jaeger-client-go/thrift/application_exception.go b/vendor/github.com/uber/jaeger-client-go/thrift/application_exception.go new file mode 100644 index 000000000000..6655cc5a9720 --- /dev/null +++ b/vendor/github.com/uber/jaeger-client-go/thrift/application_exception.go @@ -0,0 +1,142 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package thrift + +const ( + UNKNOWN_APPLICATION_EXCEPTION = 0 + UNKNOWN_METHOD = 1 + INVALID_MESSAGE_TYPE_EXCEPTION = 2 + WRONG_METHOD_NAME = 3 + BAD_SEQUENCE_ID = 4 + MISSING_RESULT = 5 + INTERNAL_ERROR = 6 + PROTOCOL_ERROR = 7 +) + +// Application level Thrift exception +type TApplicationException interface { + TException + TypeId() int32 + Read(iprot TProtocol) (TApplicationException, error) + Write(oprot TProtocol) error +} + +type tApplicationException struct { + message string + type_ int32 +} + +func (e tApplicationException) Error() string { + return e.message +} + +func NewTApplicationException(type_ int32, message string) TApplicationException { + return &tApplicationException{message, type_} +} + +func (p *tApplicationException) TypeId() int32 { + return p.type_ +} + +func (p *tApplicationException) Read(iprot TProtocol) (TApplicationException, error) { + _, err := iprot.ReadStructBegin() + if err != nil { + return nil, err + } + + message := "" + type_ := int32(UNKNOWN_APPLICATION_EXCEPTION) + + for { + _, ttype, id, err := iprot.ReadFieldBegin() + if err != nil { + return nil, err + } + if ttype == STOP { + break + } + switch id { + case 1: + if ttype == STRING { + if message, err = iprot.ReadString(); err != nil { + return nil, err + } + } else { + if err = SkipDefaultDepth(iprot, ttype); err != nil { + return nil, err + } + } + case 2: + if ttype == I32 { + if type_, err = iprot.ReadI32(); err != nil { + return nil, err + } + } else { + if err = SkipDefaultDepth(iprot, ttype); err != nil { + return nil, err + } + } + default: + if err = SkipDefaultDepth(iprot, ttype); err != nil { + return nil, err + } + } + if err = iprot.ReadFieldEnd(); err != nil { + return nil, err + } + } + return NewTApplicationException(type_, message), iprot.ReadStructEnd() +} + +func (p *tApplicationException) Write(oprot TProtocol) (err error) { + err = oprot.WriteStructBegin("TApplicationException") + if len(p.Error()) > 0 { + err = oprot.WriteFieldBegin("message", STRING, 1) + if err != nil { + return + } + err = oprot.WriteString(p.Error()) + if err != nil { + return + } + err = oprot.WriteFieldEnd() + if err != nil { + return + } + } + err = oprot.WriteFieldBegin("type", I32, 2) + if err != nil { + return + } + err = oprot.WriteI32(p.type_) + if err != nil { + return + } + err = oprot.WriteFieldEnd() + if err != nil { + return + } + err = oprot.WriteFieldStop() + if err != nil { + return + } + err = oprot.WriteStructEnd() + return +} diff --git a/vendor/github.com/uber/jaeger-client-go/thrift/binary_protocol.go b/vendor/github.com/uber/jaeger-client-go/thrift/binary_protocol.go new file mode 100644 index 000000000000..690d341111b5 --- /dev/null +++ b/vendor/github.com/uber/jaeger-client-go/thrift/binary_protocol.go @@ -0,0 +1,514 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package thrift + +import ( + "bytes" + "encoding/binary" + "errors" + "fmt" + "io" + "math" +) + +type TBinaryProtocol struct { + trans TRichTransport + origTransport TTransport + reader io.Reader + writer io.Writer + strictRead bool + strictWrite bool + buffer [64]byte +} + +type TBinaryProtocolFactory struct { + strictRead bool + strictWrite bool +} + +func NewTBinaryProtocolTransport(t TTransport) *TBinaryProtocol { + return NewTBinaryProtocol(t, false, true) +} + +func NewTBinaryProtocol(t TTransport, strictRead, strictWrite bool) *TBinaryProtocol { + p := &TBinaryProtocol{origTransport: t, strictRead: strictRead, strictWrite: strictWrite} + if et, ok := t.(TRichTransport); ok { + p.trans = et + } else { + p.trans = NewTRichTransport(t) + } + p.reader = p.trans + p.writer = p.trans + return p +} + +func NewTBinaryProtocolFactoryDefault() *TBinaryProtocolFactory { + return NewTBinaryProtocolFactory(false, true) +} + +func NewTBinaryProtocolFactory(strictRead, strictWrite bool) *TBinaryProtocolFactory { + return &TBinaryProtocolFactory{strictRead: strictRead, strictWrite: strictWrite} +} + +func (p *TBinaryProtocolFactory) GetProtocol(t TTransport) TProtocol { + return NewTBinaryProtocol(t, p.strictRead, p.strictWrite) +} + +/** + * Writing Methods + */ + +func (p *TBinaryProtocol) WriteMessageBegin(name string, typeId TMessageType, seqId int32) error { + if p.strictWrite { + version := uint32(VERSION_1) | uint32(typeId) + e := p.WriteI32(int32(version)) + if e != nil { + return e + } + e = p.WriteString(name) + if e != nil { + return e + } + e = p.WriteI32(seqId) + return e + } else { + e := p.WriteString(name) + if e != nil { + return e + } + e = p.WriteByte(int8(typeId)) + if e != nil { + return e + } + e = p.WriteI32(seqId) + return e + } + return nil +} + +func (p *TBinaryProtocol) WriteMessageEnd() error { + return nil +} + +func (p *TBinaryProtocol) WriteStructBegin(name string) error { + return nil +} + +func (p *TBinaryProtocol) WriteStructEnd() error { + return nil +} + +func (p *TBinaryProtocol) WriteFieldBegin(name string, typeId TType, id int16) error { + e := p.WriteByte(int8(typeId)) + if e != nil { + return e + } + e = p.WriteI16(id) + return e +} + +func (p *TBinaryProtocol) WriteFieldEnd() error { + return nil +} + +func (p *TBinaryProtocol) WriteFieldStop() error { + e := p.WriteByte(STOP) + return e +} + +func (p *TBinaryProtocol) WriteMapBegin(keyType TType, valueType TType, size int) error { + e := p.WriteByte(int8(keyType)) + if e != nil { + return e + } + e = p.WriteByte(int8(valueType)) + if e != nil { + return e + } + e = p.WriteI32(int32(size)) + return e +} + +func (p *TBinaryProtocol) WriteMapEnd() error { + return nil +} + +func (p *TBinaryProtocol) WriteListBegin(elemType TType, size int) error { + e := p.WriteByte(int8(elemType)) + if e != nil { + return e + } + e = p.WriteI32(int32(size)) + return e +} + +func (p *TBinaryProtocol) WriteListEnd() error { + return nil +} + +func (p *TBinaryProtocol) WriteSetBegin(elemType TType, size int) error { + e := p.WriteByte(int8(elemType)) + if e != nil { + return e + } + e = p.WriteI32(int32(size)) + return e +} + +func (p *TBinaryProtocol) WriteSetEnd() error { + return nil +} + +func (p *TBinaryProtocol) WriteBool(value bool) error { + if value { + return p.WriteByte(1) + } + return p.WriteByte(0) +} + +func (p *TBinaryProtocol) WriteByte(value int8) error { + e := p.trans.WriteByte(byte(value)) + return NewTProtocolException(e) +} + +func (p *TBinaryProtocol) WriteI16(value int16) error { + v := p.buffer[0:2] + binary.BigEndian.PutUint16(v, uint16(value)) + _, e := p.writer.Write(v) + return NewTProtocolException(e) +} + +func (p *TBinaryProtocol) WriteI32(value int32) error { + v := p.buffer[0:4] + binary.BigEndian.PutUint32(v, uint32(value)) + _, e := p.writer.Write(v) + return NewTProtocolException(e) +} + +func (p *TBinaryProtocol) WriteI64(value int64) error { + v := p.buffer[0:8] + binary.BigEndian.PutUint64(v, uint64(value)) + _, err := p.writer.Write(v) + return NewTProtocolException(err) +} + +func (p *TBinaryProtocol) WriteDouble(value float64) error { + return p.WriteI64(int64(math.Float64bits(value))) +} + +func (p *TBinaryProtocol) WriteString(value string) error { + e := p.WriteI32(int32(len(value))) + if e != nil { + return e + } + _, err := p.trans.WriteString(value) + return NewTProtocolException(err) +} + +func (p *TBinaryProtocol) WriteBinary(value []byte) error { + e := p.WriteI32(int32(len(value))) + if e != nil { + return e + } + _, err := p.writer.Write(value) + return NewTProtocolException(err) +} + +/** + * Reading methods + */ + +func (p *TBinaryProtocol) ReadMessageBegin() (name string, typeId TMessageType, seqId int32, err error) { + size, e := p.ReadI32() + if e != nil { + return "", typeId, 0, NewTProtocolException(e) + } + if size < 0 { + typeId = TMessageType(size & 0x0ff) + version := int64(int64(size) & VERSION_MASK) + if version != VERSION_1 { + return name, typeId, seqId, NewTProtocolExceptionWithType(BAD_VERSION, fmt.Errorf("Bad version in ReadMessageBegin")) + } + name, e = p.ReadString() + if e != nil { + return name, typeId, seqId, NewTProtocolException(e) + } + seqId, e = p.ReadI32() + if e != nil { + return name, typeId, seqId, NewTProtocolException(e) + } + return name, typeId, seqId, nil + } + if p.strictRead { + return name, typeId, seqId, NewTProtocolExceptionWithType(BAD_VERSION, fmt.Errorf("Missing version in ReadMessageBegin")) + } + name, e2 := p.readStringBody(size) + if e2 != nil { + return name, typeId, seqId, e2 + } + b, e3 := p.ReadByte() + if e3 != nil { + return name, typeId, seqId, e3 + } + typeId = TMessageType(b) + seqId, e4 := p.ReadI32() + if e4 != nil { + return name, typeId, seqId, e4 + } + return name, typeId, seqId, nil +} + +func (p *TBinaryProtocol) ReadMessageEnd() error { + return nil +} + +func (p *TBinaryProtocol) ReadStructBegin() (name string, err error) { + return +} + +func (p *TBinaryProtocol) ReadStructEnd() error { + return nil +} + +func (p *TBinaryProtocol) ReadFieldBegin() (name string, typeId TType, seqId int16, err error) { + t, err := p.ReadByte() + typeId = TType(t) + if err != nil { + return name, typeId, seqId, err + } + if t != STOP { + seqId, err = p.ReadI16() + } + return name, typeId, seqId, err +} + +func (p *TBinaryProtocol) ReadFieldEnd() error { + return nil +} + +var invalidDataLength = NewTProtocolExceptionWithType(INVALID_DATA, errors.New("Invalid data length")) + +func (p *TBinaryProtocol) ReadMapBegin() (kType, vType TType, size int, err error) { + k, e := p.ReadByte() + if e != nil { + err = NewTProtocolException(e) + return + } + kType = TType(k) + v, e := p.ReadByte() + if e != nil { + err = NewTProtocolException(e) + return + } + vType = TType(v) + size32, e := p.ReadI32() + if e != nil { + err = NewTProtocolException(e) + return + } + if size32 < 0 { + err = invalidDataLength + return + } + size = int(size32) + return kType, vType, size, nil +} + +func (p *TBinaryProtocol) ReadMapEnd() error { + return nil +} + +func (p *TBinaryProtocol) ReadListBegin() (elemType TType, size int, err error) { + b, e := p.ReadByte() + if e != nil { + err = NewTProtocolException(e) + return + } + elemType = TType(b) + size32, e := p.ReadI32() + if e != nil { + err = NewTProtocolException(e) + return + } + if size32 < 0 { + err = invalidDataLength + return + } + size = int(size32) + + return +} + +func (p *TBinaryProtocol) ReadListEnd() error { + return nil +} + +func (p *TBinaryProtocol) ReadSetBegin() (elemType TType, size int, err error) { + b, e := p.ReadByte() + if e != nil { + err = NewTProtocolException(e) + return + } + elemType = TType(b) + size32, e := p.ReadI32() + if e != nil { + err = NewTProtocolException(e) + return + } + if size32 < 0 { + err = invalidDataLength + return + } + size = int(size32) + return elemType, size, nil +} + +func (p *TBinaryProtocol) ReadSetEnd() error { + return nil +} + +func (p *TBinaryProtocol) ReadBool() (bool, error) { + b, e := p.ReadByte() + v := true + if b != 1 { + v = false + } + return v, e +} + +func (p *TBinaryProtocol) ReadByte() (int8, error) { + v, err := p.trans.ReadByte() + return int8(v), err +} + +func (p *TBinaryProtocol) ReadI16() (value int16, err error) { + buf := p.buffer[0:2] + err = p.readAll(buf) + value = int16(binary.BigEndian.Uint16(buf)) + return value, err +} + +func (p *TBinaryProtocol) ReadI32() (value int32, err error) { + buf := p.buffer[0:4] + err = p.readAll(buf) + value = int32(binary.BigEndian.Uint32(buf)) + return value, err +} + +func (p *TBinaryProtocol) ReadI64() (value int64, err error) { + buf := p.buffer[0:8] + err = p.readAll(buf) + value = int64(binary.BigEndian.Uint64(buf)) + return value, err +} + +func (p *TBinaryProtocol) ReadDouble() (value float64, err error) { + buf := p.buffer[0:8] + err = p.readAll(buf) + value = math.Float64frombits(binary.BigEndian.Uint64(buf)) + return value, err +} + +func (p *TBinaryProtocol) ReadString() (value string, err error) { + size, e := p.ReadI32() + if e != nil { + return "", e + } + if size < 0 { + err = invalidDataLength + return + } + + return p.readStringBody(size) +} + +func (p *TBinaryProtocol) ReadBinary() ([]byte, error) { + size, e := p.ReadI32() + if e != nil { + return nil, e + } + if size < 0 { + return nil, invalidDataLength + } + if uint64(size) > p.trans.RemainingBytes() { + return nil, invalidDataLength + } + + isize := int(size) + buf := make([]byte, isize) + _, err := io.ReadFull(p.trans, buf) + return buf, NewTProtocolException(err) +} + +func (p *TBinaryProtocol) Flush() (err error) { + return NewTProtocolException(p.trans.Flush()) +} + +func (p *TBinaryProtocol) Skip(fieldType TType) (err error) { + return SkipDefaultDepth(p, fieldType) +} + +func (p *TBinaryProtocol) Transport() TTransport { + return p.origTransport +} + +func (p *TBinaryProtocol) readAll(buf []byte) error { + _, err := io.ReadFull(p.reader, buf) + return NewTProtocolException(err) +} + +const readLimit = 32768 + +func (p *TBinaryProtocol) readStringBody(size int32) (value string, err error) { + if size < 0 { + return "", nil + } + if uint64(size) > p.trans.RemainingBytes() { + return "", invalidDataLength + } + + var ( + buf bytes.Buffer + e error + b []byte + ) + + switch { + case int(size) <= len(p.buffer): + b = p.buffer[:size] // avoids allocation for small reads + case int(size) < readLimit: + b = make([]byte, size) + default: + b = make([]byte, readLimit) + } + + for size > 0 { + _, e = io.ReadFull(p.trans, b) + buf.Write(b) + if e != nil { + break + } + size -= readLimit + if size < readLimit && size > 0 { + b = b[:size] + } + } + return buf.String(), NewTProtocolException(e) +} diff --git a/vendor/github.com/uber/jaeger-client-go/thrift/compact_protocol.go b/vendor/github.com/uber/jaeger-client-go/thrift/compact_protocol.go new file mode 100644 index 000000000000..b9299f2fa13c --- /dev/null +++ b/vendor/github.com/uber/jaeger-client-go/thrift/compact_protocol.go @@ -0,0 +1,815 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package thrift + +import ( + "encoding/binary" + "fmt" + "io" + "math" +) + +const ( + COMPACT_PROTOCOL_ID = 0x082 + COMPACT_VERSION = 1 + COMPACT_VERSION_MASK = 0x1f + COMPACT_TYPE_MASK = 0x0E0 + COMPACT_TYPE_BITS = 0x07 + COMPACT_TYPE_SHIFT_AMOUNT = 5 +) + +type tCompactType byte + +const ( + COMPACT_BOOLEAN_TRUE = 0x01 + COMPACT_BOOLEAN_FALSE = 0x02 + COMPACT_BYTE = 0x03 + COMPACT_I16 = 0x04 + COMPACT_I32 = 0x05 + COMPACT_I64 = 0x06 + COMPACT_DOUBLE = 0x07 + COMPACT_BINARY = 0x08 + COMPACT_LIST = 0x09 + COMPACT_SET = 0x0A + COMPACT_MAP = 0x0B + COMPACT_STRUCT = 0x0C +) + +var ( + ttypeToCompactType map[TType]tCompactType +) + +func init() { + ttypeToCompactType = map[TType]tCompactType{ + STOP: STOP, + BOOL: COMPACT_BOOLEAN_TRUE, + BYTE: COMPACT_BYTE, + I16: COMPACT_I16, + I32: COMPACT_I32, + I64: COMPACT_I64, + DOUBLE: COMPACT_DOUBLE, + STRING: COMPACT_BINARY, + LIST: COMPACT_LIST, + SET: COMPACT_SET, + MAP: COMPACT_MAP, + STRUCT: COMPACT_STRUCT, + } +} + +type TCompactProtocolFactory struct{} + +func NewTCompactProtocolFactory() *TCompactProtocolFactory { + return &TCompactProtocolFactory{} +} + +func (p *TCompactProtocolFactory) GetProtocol(trans TTransport) TProtocol { + return NewTCompactProtocol(trans) +} + +type TCompactProtocol struct { + trans TRichTransport + origTransport TTransport + + // Used to keep track of the last field for the current and previous structs, + // so we can do the delta stuff. + lastField []int + lastFieldId int + + // If we encounter a boolean field begin, save the TField here so it can + // have the value incorporated. + booleanFieldName string + booleanFieldId int16 + booleanFieldPending bool + + // If we read a field header, and it's a boolean field, save the boolean + // value here so that readBool can use it. + boolValue bool + boolValueIsNotNull bool + buffer [64]byte +} + +// Create a TCompactProtocol given a TTransport +func NewTCompactProtocol(trans TTransport) *TCompactProtocol { + p := &TCompactProtocol{origTransport: trans, lastField: []int{}} + if et, ok := trans.(TRichTransport); ok { + p.trans = et + } else { + p.trans = NewTRichTransport(trans) + } + + return p + +} + +// +// Public Writing methods. +// + +// Write a message header to the wire. Compact Protocol messages contain the +// protocol version so we can migrate forwards in the future if need be. +func (p *TCompactProtocol) WriteMessageBegin(name string, typeId TMessageType, seqid int32) error { + err := p.writeByteDirect(COMPACT_PROTOCOL_ID) + if err != nil { + return NewTProtocolException(err) + } + err = p.writeByteDirect((COMPACT_VERSION & COMPACT_VERSION_MASK) | ((byte(typeId) << COMPACT_TYPE_SHIFT_AMOUNT) & COMPACT_TYPE_MASK)) + if err != nil { + return NewTProtocolException(err) + } + _, err = p.writeVarint32(seqid) + if err != nil { + return NewTProtocolException(err) + } + e := p.WriteString(name) + return e + +} + +func (p *TCompactProtocol) WriteMessageEnd() error { return nil } + +// Write a struct begin. This doesn't actually put anything on the wire. We +// use it as an opportunity to put special placeholder markers on the field +// stack so we can get the field id deltas correct. +func (p *TCompactProtocol) WriteStructBegin(name string) error { + p.lastField = append(p.lastField, p.lastFieldId) + p.lastFieldId = 0 + return nil +} + +// Write a struct end. This doesn't actually put anything on the wire. We use +// this as an opportunity to pop the last field from the current struct off +// of the field stack. +func (p *TCompactProtocol) WriteStructEnd() error { + p.lastFieldId = p.lastField[len(p.lastField)-1] + p.lastField = p.lastField[:len(p.lastField)-1] + return nil +} + +func (p *TCompactProtocol) WriteFieldBegin(name string, typeId TType, id int16) error { + if typeId == BOOL { + // we want to possibly include the value, so we'll wait. + p.booleanFieldName, p.booleanFieldId, p.booleanFieldPending = name, id, true + return nil + } + _, err := p.writeFieldBeginInternal(name, typeId, id, 0xFF) + return NewTProtocolException(err) +} + +// The workhorse of writeFieldBegin. It has the option of doing a +// 'type override' of the type header. This is used specifically in the +// boolean field case. +func (p *TCompactProtocol) writeFieldBeginInternal(name string, typeId TType, id int16, typeOverride byte) (int, error) { + // short lastField = lastField_.pop(); + + // if there's a type override, use that. + var typeToWrite byte + if typeOverride == 0xFF { + typeToWrite = byte(p.getCompactType(typeId)) + } else { + typeToWrite = typeOverride + } + // check if we can use delta encoding for the field id + fieldId := int(id) + written := 0 + if fieldId > p.lastFieldId && fieldId-p.lastFieldId <= 15 { + // write them together + err := p.writeByteDirect(byte((fieldId-p.lastFieldId)<<4) | typeToWrite) + if err != nil { + return 0, err + } + } else { + // write them separate + err := p.writeByteDirect(typeToWrite) + if err != nil { + return 0, err + } + err = p.WriteI16(id) + written = 1 + 2 + if err != nil { + return 0, err + } + } + + p.lastFieldId = fieldId + // p.lastField.Push(field.id); + return written, nil +} + +func (p *TCompactProtocol) WriteFieldEnd() error { return nil } + +func (p *TCompactProtocol) WriteFieldStop() error { + err := p.writeByteDirect(STOP) + return NewTProtocolException(err) +} + +func (p *TCompactProtocol) WriteMapBegin(keyType TType, valueType TType, size int) error { + if size == 0 { + err := p.writeByteDirect(0) + return NewTProtocolException(err) + } + _, err := p.writeVarint32(int32(size)) + if err != nil { + return NewTProtocolException(err) + } + err = p.writeByteDirect(byte(p.getCompactType(keyType))<<4 | byte(p.getCompactType(valueType))) + return NewTProtocolException(err) +} + +func (p *TCompactProtocol) WriteMapEnd() error { return nil } + +// Write a list header. +func (p *TCompactProtocol) WriteListBegin(elemType TType, size int) error { + _, err := p.writeCollectionBegin(elemType, size) + return NewTProtocolException(err) +} + +func (p *TCompactProtocol) WriteListEnd() error { return nil } + +// Write a set header. +func (p *TCompactProtocol) WriteSetBegin(elemType TType, size int) error { + _, err := p.writeCollectionBegin(elemType, size) + return NewTProtocolException(err) +} + +func (p *TCompactProtocol) WriteSetEnd() error { return nil } + +func (p *TCompactProtocol) WriteBool(value bool) error { + v := byte(COMPACT_BOOLEAN_FALSE) + if value { + v = byte(COMPACT_BOOLEAN_TRUE) + } + if p.booleanFieldPending { + // we haven't written the field header yet + _, err := p.writeFieldBeginInternal(p.booleanFieldName, BOOL, p.booleanFieldId, v) + p.booleanFieldPending = false + return NewTProtocolException(err) + } + // we're not part of a field, so just write the value. + err := p.writeByteDirect(v) + return NewTProtocolException(err) +} + +// Write a byte. Nothing to see here! +func (p *TCompactProtocol) WriteByte(value int8) error { + err := p.writeByteDirect(byte(value)) + return NewTProtocolException(err) +} + +// Write an I16 as a zigzag varint. +func (p *TCompactProtocol) WriteI16(value int16) error { + _, err := p.writeVarint32(p.int32ToZigzag(int32(value))) + return NewTProtocolException(err) +} + +// Write an i32 as a zigzag varint. +func (p *TCompactProtocol) WriteI32(value int32) error { + _, err := p.writeVarint32(p.int32ToZigzag(value)) + return NewTProtocolException(err) +} + +// Write an i64 as a zigzag varint. +func (p *TCompactProtocol) WriteI64(value int64) error { + _, err := p.writeVarint64(p.int64ToZigzag(value)) + return NewTProtocolException(err) +} + +// Write a double to the wire as 8 bytes. +func (p *TCompactProtocol) WriteDouble(value float64) error { + buf := p.buffer[0:8] + binary.LittleEndian.PutUint64(buf, math.Float64bits(value)) + _, err := p.trans.Write(buf) + return NewTProtocolException(err) +} + +// Write a string to the wire with a varint size preceding. +func (p *TCompactProtocol) WriteString(value string) error { + _, e := p.writeVarint32(int32(len(value))) + if e != nil { + return NewTProtocolException(e) + } + if len(value) > 0 { + } + _, e = p.trans.WriteString(value) + return e +} + +// Write a byte array, using a varint for the size. +func (p *TCompactProtocol) WriteBinary(bin []byte) error { + _, e := p.writeVarint32(int32(len(bin))) + if e != nil { + return NewTProtocolException(e) + } + if len(bin) > 0 { + _, e = p.trans.Write(bin) + return NewTProtocolException(e) + } + return nil +} + +// +// Reading methods. +// + +// Read a message header. +func (p *TCompactProtocol) ReadMessageBegin() (name string, typeId TMessageType, seqId int32, err error) { + + protocolId, err := p.readByteDirect() + if err != nil { + return + } + + if protocolId != COMPACT_PROTOCOL_ID { + e := fmt.Errorf("Expected protocol id %02x but got %02x", COMPACT_PROTOCOL_ID, protocolId) + return "", typeId, seqId, NewTProtocolExceptionWithType(BAD_VERSION, e) + } + + versionAndType, err := p.readByteDirect() + if err != nil { + return + } + + version := versionAndType & COMPACT_VERSION_MASK + typeId = TMessageType((versionAndType >> COMPACT_TYPE_SHIFT_AMOUNT) & COMPACT_TYPE_BITS) + if version != COMPACT_VERSION { + e := fmt.Errorf("Expected version %02x but got %02x", COMPACT_VERSION, version) + err = NewTProtocolExceptionWithType(BAD_VERSION, e) + return + } + seqId, e := p.readVarint32() + if e != nil { + err = NewTProtocolException(e) + return + } + name, err = p.ReadString() + return +} + +func (p *TCompactProtocol) ReadMessageEnd() error { return nil } + +// Read a struct begin. There's nothing on the wire for this, but it is our +// opportunity to push a new struct begin marker onto the field stack. +func (p *TCompactProtocol) ReadStructBegin() (name string, err error) { + p.lastField = append(p.lastField, p.lastFieldId) + p.lastFieldId = 0 + return +} + +// Doesn't actually consume any wire data, just removes the last field for +// this struct from the field stack. +func (p *TCompactProtocol) ReadStructEnd() error { + // consume the last field we read off the wire. + p.lastFieldId = p.lastField[len(p.lastField)-1] + p.lastField = p.lastField[:len(p.lastField)-1] + return nil +} + +// Read a field header off the wire. +func (p *TCompactProtocol) ReadFieldBegin() (name string, typeId TType, id int16, err error) { + t, err := p.readByteDirect() + if err != nil { + return + } + + // if it's a stop, then we can return immediately, as the struct is over. + if (t & 0x0f) == STOP { + return "", STOP, 0, nil + } + + // mask off the 4 MSB of the type header. it could contain a field id delta. + modifier := int16((t & 0xf0) >> 4) + if modifier == 0 { + // not a delta. look ahead for the zigzag varint field id. + id, err = p.ReadI16() + if err != nil { + return + } + } else { + // has a delta. add the delta to the last read field id. + id = int16(p.lastFieldId) + modifier + } + typeId, e := p.getTType(tCompactType(t & 0x0f)) + if e != nil { + err = NewTProtocolException(e) + return + } + + // if this happens to be a boolean field, the value is encoded in the type + if p.isBoolType(t) { + // save the boolean value in a special instance variable. + p.boolValue = (byte(t)&0x0f == COMPACT_BOOLEAN_TRUE) + p.boolValueIsNotNull = true + } + + // push the new field onto the field stack so we can keep the deltas going. + p.lastFieldId = int(id) + return +} + +func (p *TCompactProtocol) ReadFieldEnd() error { return nil } + +// Read a map header off the wire. If the size is zero, skip reading the key +// and value type. This means that 0-length maps will yield TMaps without the +// "correct" types. +func (p *TCompactProtocol) ReadMapBegin() (keyType TType, valueType TType, size int, err error) { + size32, e := p.readVarint32() + if e != nil { + err = NewTProtocolException(e) + return + } + if size32 < 0 { + err = invalidDataLength + return + } + size = int(size32) + + keyAndValueType := byte(STOP) + if size != 0 { + keyAndValueType, err = p.readByteDirect() + if err != nil { + return + } + } + keyType, _ = p.getTType(tCompactType(keyAndValueType >> 4)) + valueType, _ = p.getTType(tCompactType(keyAndValueType & 0xf)) + return +} + +func (p *TCompactProtocol) ReadMapEnd() error { return nil } + +// Read a list header off the wire. If the list size is 0-14, the size will +// be packed into the element type header. If it's a longer list, the 4 MSB +// of the element type header will be 0xF, and a varint will follow with the +// true size. +func (p *TCompactProtocol) ReadListBegin() (elemType TType, size int, err error) { + size_and_type, err := p.readByteDirect() + if err != nil { + return + } + size = int((size_and_type >> 4) & 0x0f) + if size == 15 { + size2, e := p.readVarint32() + if e != nil { + err = NewTProtocolException(e) + return + } + if size2 < 0 { + err = invalidDataLength + return + } + size = int(size2) + } + elemType, e := p.getTType(tCompactType(size_and_type)) + if e != nil { + err = NewTProtocolException(e) + return + } + return +} + +func (p *TCompactProtocol) ReadListEnd() error { return nil } + +// Read a set header off the wire. If the set size is 0-14, the size will +// be packed into the element type header. If it's a longer set, the 4 MSB +// of the element type header will be 0xF, and a varint will follow with the +// true size. +func (p *TCompactProtocol) ReadSetBegin() (elemType TType, size int, err error) { + return p.ReadListBegin() +} + +func (p *TCompactProtocol) ReadSetEnd() error { return nil } + +// Read a boolean off the wire. If this is a boolean field, the value should +// already have been read during readFieldBegin, so we'll just consume the +// pre-stored value. Otherwise, read a byte. +func (p *TCompactProtocol) ReadBool() (value bool, err error) { + if p.boolValueIsNotNull { + p.boolValueIsNotNull = false + return p.boolValue, nil + } + v, err := p.readByteDirect() + return v == COMPACT_BOOLEAN_TRUE, err +} + +// Read a single byte off the wire. Nothing interesting here. +func (p *TCompactProtocol) ReadByte() (int8, error) { + v, err := p.readByteDirect() + if err != nil { + return 0, NewTProtocolException(err) + } + return int8(v), err +} + +// Read an i16 from the wire as a zigzag varint. +func (p *TCompactProtocol) ReadI16() (value int16, err error) { + v, err := p.ReadI32() + return int16(v), err +} + +// Read an i32 from the wire as a zigzag varint. +func (p *TCompactProtocol) ReadI32() (value int32, err error) { + v, e := p.readVarint32() + if e != nil { + return 0, NewTProtocolException(e) + } + value = p.zigzagToInt32(v) + return value, nil +} + +// Read an i64 from the wire as a zigzag varint. +func (p *TCompactProtocol) ReadI64() (value int64, err error) { + v, e := p.readVarint64() + if e != nil { + return 0, NewTProtocolException(e) + } + value = p.zigzagToInt64(v) + return value, nil +} + +// No magic here - just read a double off the wire. +func (p *TCompactProtocol) ReadDouble() (value float64, err error) { + longBits := p.buffer[0:8] + _, e := io.ReadFull(p.trans, longBits) + if e != nil { + return 0.0, NewTProtocolException(e) + } + return math.Float64frombits(p.bytesToUint64(longBits)), nil +} + +// Reads a []byte (via readBinary), and then UTF-8 decodes it. +func (p *TCompactProtocol) ReadString() (value string, err error) { + length, e := p.readVarint32() + if e != nil { + return "", NewTProtocolException(e) + } + if length < 0 { + return "", invalidDataLength + } + if uint64(length) > p.trans.RemainingBytes() { + return "", invalidDataLength + } + + if length == 0 { + return "", nil + } + var buf []byte + if length <= int32(len(p.buffer)) { + buf = p.buffer[0:length] + } else { + buf = make([]byte, length) + } + _, e = io.ReadFull(p.trans, buf) + return string(buf), NewTProtocolException(e) +} + +// Read a []byte from the wire. +func (p *TCompactProtocol) ReadBinary() (value []byte, err error) { + length, e := p.readVarint32() + if e != nil { + return nil, NewTProtocolException(e) + } + if length == 0 { + return []byte{}, nil + } + if length < 0 { + return nil, invalidDataLength + } + if uint64(length) > p.trans.RemainingBytes() { + return nil, invalidDataLength + } + + buf := make([]byte, length) + _, e = io.ReadFull(p.trans, buf) + return buf, NewTProtocolException(e) +} + +func (p *TCompactProtocol) Flush() (err error) { + return NewTProtocolException(p.trans.Flush()) +} + +func (p *TCompactProtocol) Skip(fieldType TType) (err error) { + return SkipDefaultDepth(p, fieldType) +} + +func (p *TCompactProtocol) Transport() TTransport { + return p.origTransport +} + +// +// Internal writing methods +// + +// Abstract method for writing the start of lists and sets. List and sets on +// the wire differ only by the type indicator. +func (p *TCompactProtocol) writeCollectionBegin(elemType TType, size int) (int, error) { + if size <= 14 { + return 1, p.writeByteDirect(byte(int32(size<<4) | int32(p.getCompactType(elemType)))) + } + err := p.writeByteDirect(0xf0 | byte(p.getCompactType(elemType))) + if err != nil { + return 0, err + } + m, err := p.writeVarint32(int32(size)) + return 1 + m, err +} + +// Write an i32 as a varint. Results in 1-5 bytes on the wire. +// TODO(pomack): make a permanent buffer like writeVarint64? +func (p *TCompactProtocol) writeVarint32(n int32) (int, error) { + i32buf := p.buffer[0:5] + idx := 0 + for { + if (n & ^0x7F) == 0 { + i32buf[idx] = byte(n) + idx++ + // p.writeByteDirect(byte(n)); + break + // return; + } else { + i32buf[idx] = byte((n & 0x7F) | 0x80) + idx++ + // p.writeByteDirect(byte(((n & 0x7F) | 0x80))); + u := uint32(n) + n = int32(u >> 7) + } + } + return p.trans.Write(i32buf[0:idx]) +} + +// Write an i64 as a varint. Results in 1-10 bytes on the wire. +func (p *TCompactProtocol) writeVarint64(n int64) (int, error) { + varint64out := p.buffer[0:10] + idx := 0 + for { + if (n & ^0x7F) == 0 { + varint64out[idx] = byte(n) + idx++ + break + } else { + varint64out[idx] = byte((n & 0x7F) | 0x80) + idx++ + u := uint64(n) + n = int64(u >> 7) + } + } + return p.trans.Write(varint64out[0:idx]) +} + +// Convert l into a zigzag long. This allows negative numbers to be +// represented compactly as a varint. +func (p *TCompactProtocol) int64ToZigzag(l int64) int64 { + return (l << 1) ^ (l >> 63) +} + +// Convert l into a zigzag long. This allows negative numbers to be +// represented compactly as a varint. +func (p *TCompactProtocol) int32ToZigzag(n int32) int32 { + return (n << 1) ^ (n >> 31) +} + +func (p *TCompactProtocol) fixedUint64ToBytes(n uint64, buf []byte) { + binary.LittleEndian.PutUint64(buf, n) +} + +func (p *TCompactProtocol) fixedInt64ToBytes(n int64, buf []byte) { + binary.LittleEndian.PutUint64(buf, uint64(n)) +} + +// Writes a byte without any possibility of all that field header nonsense. +// Used internally by other writing methods that know they need to write a byte. +func (p *TCompactProtocol) writeByteDirect(b byte) error { + return p.trans.WriteByte(b) +} + +// Writes a byte without any possibility of all that field header nonsense. +func (p *TCompactProtocol) writeIntAsByteDirect(n int) (int, error) { + return 1, p.writeByteDirect(byte(n)) +} + +// +// Internal reading methods +// + +// Read an i32 from the wire as a varint. The MSB of each byte is set +// if there is another byte to follow. This can read up to 5 bytes. +func (p *TCompactProtocol) readVarint32() (int32, error) { + // if the wire contains the right stuff, this will just truncate the i64 we + // read and get us the right sign. + v, err := p.readVarint64() + return int32(v), err +} + +// Read an i64 from the wire as a proper varint. The MSB of each byte is set +// if there is another byte to follow. This can read up to 10 bytes. +func (p *TCompactProtocol) readVarint64() (int64, error) { + shift := uint(0) + result := int64(0) + for { + b, err := p.readByteDirect() + if err != nil { + return 0, err + } + result |= int64(b&0x7f) << shift + if (b & 0x80) != 0x80 { + break + } + shift += 7 + } + return result, nil +} + +// Read a byte, unlike ReadByte that reads Thrift-byte that is i8. +func (p *TCompactProtocol) readByteDirect() (byte, error) { + return p.trans.ReadByte() +} + +// +// encoding helpers +// + +// Convert from zigzag int to int. +func (p *TCompactProtocol) zigzagToInt32(n int32) int32 { + u := uint32(n) + return int32(u>>1) ^ -(n & 1) +} + +// Convert from zigzag long to long. +func (p *TCompactProtocol) zigzagToInt64(n int64) int64 { + u := uint64(n) + return int64(u>>1) ^ -(n & 1) +} + +// Note that it's important that the mask bytes are long literals, +// otherwise they'll default to ints, and when you shift an int left 56 bits, +// you just get a messed up int. +func (p *TCompactProtocol) bytesToInt64(b []byte) int64 { + return int64(binary.LittleEndian.Uint64(b)) +} + +// Note that it's important that the mask bytes are long literals, +// otherwise they'll default to ints, and when you shift an int left 56 bits, +// you just get a messed up int. +func (p *TCompactProtocol) bytesToUint64(b []byte) uint64 { + return binary.LittleEndian.Uint64(b) +} + +// +// type testing and converting +// + +func (p *TCompactProtocol) isBoolType(b byte) bool { + return (b&0x0f) == COMPACT_BOOLEAN_TRUE || (b&0x0f) == COMPACT_BOOLEAN_FALSE +} + +// Given a tCompactType constant, convert it to its corresponding +// TType value. +func (p *TCompactProtocol) getTType(t tCompactType) (TType, error) { + switch byte(t) & 0x0f { + case STOP: + return STOP, nil + case COMPACT_BOOLEAN_FALSE, COMPACT_BOOLEAN_TRUE: + return BOOL, nil + case COMPACT_BYTE: + return BYTE, nil + case COMPACT_I16: + return I16, nil + case COMPACT_I32: + return I32, nil + case COMPACT_I64: + return I64, nil + case COMPACT_DOUBLE: + return DOUBLE, nil + case COMPACT_BINARY: + return STRING, nil + case COMPACT_LIST: + return LIST, nil + case COMPACT_SET: + return SET, nil + case COMPACT_MAP: + return MAP, nil + case COMPACT_STRUCT: + return STRUCT, nil + } + return STOP, TException(fmt.Errorf("don't know what type: %d", t&0x0f)) +} + +// Given a TType value, find the appropriate TCompactProtocol.Types constant. +func (p *TCompactProtocol) getCompactType(t TType) tCompactType { + return ttypeToCompactType[t] +} diff --git a/vendor/github.com/uber/jaeger-client-go/thrift/exception.go b/vendor/github.com/uber/jaeger-client-go/thrift/exception.go new file mode 100644 index 000000000000..ea8d6f66114c --- /dev/null +++ b/vendor/github.com/uber/jaeger-client-go/thrift/exception.go @@ -0,0 +1,44 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package thrift + +import ( + "errors" +) + +// Generic Thrift exception +type TException interface { + error +} + +// Prepends additional information to an error without losing the Thrift exception interface +func PrependError(prepend string, err error) error { + if t, ok := err.(TTransportException); ok { + return NewTTransportException(t.TypeId(), prepend+t.Error()) + } + if t, ok := err.(TProtocolException); ok { + return NewTProtocolExceptionWithType(t.TypeId(), errors.New(prepend+err.Error())) + } + if t, ok := err.(TApplicationException); ok { + return NewTApplicationException(t.TypeId(), prepend+t.Error()) + } + + return errors.New(prepend + err.Error()) +} diff --git a/vendor/github.com/uber/jaeger-client-go/thrift/memory_buffer.go b/vendor/github.com/uber/jaeger-client-go/thrift/memory_buffer.go new file mode 100644 index 000000000000..b62fd56f0634 --- /dev/null +++ b/vendor/github.com/uber/jaeger-client-go/thrift/memory_buffer.go @@ -0,0 +1,79 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package thrift + +import ( + "bytes" +) + +// Memory buffer-based implementation of the TTransport interface. +type TMemoryBuffer struct { + *bytes.Buffer + size int +} + +type TMemoryBufferTransportFactory struct { + size int +} + +func (p *TMemoryBufferTransportFactory) GetTransport(trans TTransport) TTransport { + if trans != nil { + t, ok := trans.(*TMemoryBuffer) + if ok && t.size > 0 { + return NewTMemoryBufferLen(t.size) + } + } + return NewTMemoryBufferLen(p.size) +} + +func NewTMemoryBufferTransportFactory(size int) *TMemoryBufferTransportFactory { + return &TMemoryBufferTransportFactory{size: size} +} + +func NewTMemoryBuffer() *TMemoryBuffer { + return &TMemoryBuffer{Buffer: &bytes.Buffer{}, size: 0} +} + +func NewTMemoryBufferLen(size int) *TMemoryBuffer { + buf := make([]byte, 0, size) + return &TMemoryBuffer{Buffer: bytes.NewBuffer(buf), size: size} +} + +func (p *TMemoryBuffer) IsOpen() bool { + return true +} + +func (p *TMemoryBuffer) Open() error { + return nil +} + +func (p *TMemoryBuffer) Close() error { + p.Buffer.Reset() + return nil +} + +// Flushing a memory buffer is a no-op +func (p *TMemoryBuffer) Flush() error { + return nil +} + +func (p *TMemoryBuffer) RemainingBytes() (num_bytes uint64) { + return uint64(p.Buffer.Len()) +} diff --git a/vendor/github.com/uber/jaeger-client-go/thrift/messagetype.go b/vendor/github.com/uber/jaeger-client-go/thrift/messagetype.go new file mode 100644 index 000000000000..25ab2e98a256 --- /dev/null +++ b/vendor/github.com/uber/jaeger-client-go/thrift/messagetype.go @@ -0,0 +1,31 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package thrift + +// Message type constants in the Thrift protocol. +type TMessageType int32 + +const ( + INVALID_TMESSAGE_TYPE TMessageType = 0 + CALL TMessageType = 1 + REPLY TMessageType = 2 + EXCEPTION TMessageType = 3 + ONEWAY TMessageType = 4 +) diff --git a/vendor/github.com/uber/jaeger-client-go/thrift/numeric.go b/vendor/github.com/uber/jaeger-client-go/thrift/numeric.go new file mode 100644 index 000000000000..aa8daa9b54f9 --- /dev/null +++ b/vendor/github.com/uber/jaeger-client-go/thrift/numeric.go @@ -0,0 +1,164 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package thrift + +import ( + "math" + "strconv" +) + +type Numeric interface { + Int64() int64 + Int32() int32 + Int16() int16 + Byte() byte + Int() int + Float64() float64 + Float32() float32 + String() string + isNull() bool +} + +type numeric struct { + iValue int64 + dValue float64 + sValue string + isNil bool +} + +var ( + INFINITY Numeric + NEGATIVE_INFINITY Numeric + NAN Numeric + ZERO Numeric + NUMERIC_NULL Numeric +) + +func NewNumericFromDouble(dValue float64) Numeric { + if math.IsInf(dValue, 1) { + return INFINITY + } + if math.IsInf(dValue, -1) { + return NEGATIVE_INFINITY + } + if math.IsNaN(dValue) { + return NAN + } + iValue := int64(dValue) + sValue := strconv.FormatFloat(dValue, 'g', 10, 64) + isNil := false + return &numeric{iValue: iValue, dValue: dValue, sValue: sValue, isNil: isNil} +} + +func NewNumericFromI64(iValue int64) Numeric { + dValue := float64(iValue) + sValue := string(iValue) + isNil := false + return &numeric{iValue: iValue, dValue: dValue, sValue: sValue, isNil: isNil} +} + +func NewNumericFromI32(iValue int32) Numeric { + dValue := float64(iValue) + sValue := string(iValue) + isNil := false + return &numeric{iValue: int64(iValue), dValue: dValue, sValue: sValue, isNil: isNil} +} + +func NewNumericFromString(sValue string) Numeric { + if sValue == INFINITY.String() { + return INFINITY + } + if sValue == NEGATIVE_INFINITY.String() { + return NEGATIVE_INFINITY + } + if sValue == NAN.String() { + return NAN + } + iValue, _ := strconv.ParseInt(sValue, 10, 64) + dValue, _ := strconv.ParseFloat(sValue, 64) + isNil := len(sValue) == 0 + return &numeric{iValue: iValue, dValue: dValue, sValue: sValue, isNil: isNil} +} + +func NewNumericFromJSONString(sValue string, isNull bool) Numeric { + if isNull { + return NewNullNumeric() + } + if sValue == JSON_INFINITY { + return INFINITY + } + if sValue == JSON_NEGATIVE_INFINITY { + return NEGATIVE_INFINITY + } + if sValue == JSON_NAN { + return NAN + } + iValue, _ := strconv.ParseInt(sValue, 10, 64) + dValue, _ := strconv.ParseFloat(sValue, 64) + return &numeric{iValue: iValue, dValue: dValue, sValue: sValue, isNil: isNull} +} + +func NewNullNumeric() Numeric { + return &numeric{iValue: 0, dValue: 0.0, sValue: "", isNil: true} +} + +func (p *numeric) Int64() int64 { + return p.iValue +} + +func (p *numeric) Int32() int32 { + return int32(p.iValue) +} + +func (p *numeric) Int16() int16 { + return int16(p.iValue) +} + +func (p *numeric) Byte() byte { + return byte(p.iValue) +} + +func (p *numeric) Int() int { + return int(p.iValue) +} + +func (p *numeric) Float64() float64 { + return p.dValue +} + +func (p *numeric) Float32() float32 { + return float32(p.dValue) +} + +func (p *numeric) String() string { + return p.sValue +} + +func (p *numeric) isNull() bool { + return p.isNil +} + +func init() { + INFINITY = &numeric{iValue: 0, dValue: math.Inf(1), sValue: "Infinity", isNil: false} + NEGATIVE_INFINITY = &numeric{iValue: 0, dValue: math.Inf(-1), sValue: "-Infinity", isNil: false} + NAN = &numeric{iValue: 0, dValue: math.NaN(), sValue: "NaN", isNil: false} + ZERO = &numeric{iValue: 0, dValue: 0, sValue: "0", isNil: false} + NUMERIC_NULL = &numeric{iValue: 0, dValue: 0, sValue: "0", isNil: true} +} diff --git a/vendor/github.com/uber/jaeger-client-go/thrift/processor.go b/vendor/github.com/uber/jaeger-client-go/thrift/processor.go new file mode 100644 index 000000000000..ca0d3faf20ee --- /dev/null +++ b/vendor/github.com/uber/jaeger-client-go/thrift/processor.go @@ -0,0 +1,30 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package thrift + +// A processor is a generic object which operates upon an input stream and +// writes to some output stream. +type TProcessor interface { + Process(in, out TProtocol) (bool, TException) +} + +type TProcessorFunction interface { + Process(seqId int32, in, out TProtocol) (bool, TException) +} diff --git a/vendor/github.com/uber/jaeger-client-go/thrift/protocol.go b/vendor/github.com/uber/jaeger-client-go/thrift/protocol.go new file mode 100644 index 000000000000..45fa202e741c --- /dev/null +++ b/vendor/github.com/uber/jaeger-client-go/thrift/protocol.go @@ -0,0 +1,175 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package thrift + +import ( + "errors" +) + +const ( + VERSION_MASK = 0xffff0000 + VERSION_1 = 0x80010000 +) + +type TProtocol interface { + WriteMessageBegin(name string, typeId TMessageType, seqid int32) error + WriteMessageEnd() error + WriteStructBegin(name string) error + WriteStructEnd() error + WriteFieldBegin(name string, typeId TType, id int16) error + WriteFieldEnd() error + WriteFieldStop() error + WriteMapBegin(keyType TType, valueType TType, size int) error + WriteMapEnd() error + WriteListBegin(elemType TType, size int) error + WriteListEnd() error + WriteSetBegin(elemType TType, size int) error + WriteSetEnd() error + WriteBool(value bool) error + WriteByte(value int8) error + WriteI16(value int16) error + WriteI32(value int32) error + WriteI64(value int64) error + WriteDouble(value float64) error + WriteString(value string) error + WriteBinary(value []byte) error + + ReadMessageBegin() (name string, typeId TMessageType, seqid int32, err error) + ReadMessageEnd() error + ReadStructBegin() (name string, err error) + ReadStructEnd() error + ReadFieldBegin() (name string, typeId TType, id int16, err error) + ReadFieldEnd() error + ReadMapBegin() (keyType TType, valueType TType, size int, err error) + ReadMapEnd() error + ReadListBegin() (elemType TType, size int, err error) + ReadListEnd() error + ReadSetBegin() (elemType TType, size int, err error) + ReadSetEnd() error + ReadBool() (value bool, err error) + ReadByte() (value int8, err error) + ReadI16() (value int16, err error) + ReadI32() (value int32, err error) + ReadI64() (value int64, err error) + ReadDouble() (value float64, err error) + ReadString() (value string, err error) + ReadBinary() (value []byte, err error) + + Skip(fieldType TType) (err error) + Flush() (err error) + + Transport() TTransport +} + +// The maximum recursive depth the skip() function will traverse +const DEFAULT_RECURSION_DEPTH = 64 + +// Skips over the next data element from the provided input TProtocol object. +func SkipDefaultDepth(prot TProtocol, typeId TType) (err error) { + return Skip(prot, typeId, DEFAULT_RECURSION_DEPTH) +} + +// Skips over the next data element from the provided input TProtocol object. +func Skip(self TProtocol, fieldType TType, maxDepth int) (err error) { + + if maxDepth <= 0 { + return NewTProtocolExceptionWithType( DEPTH_LIMIT, errors.New("Depth limit exceeded")) + } + + switch fieldType { + case STOP: + return + case BOOL: + _, err = self.ReadBool() + return + case BYTE: + _, err = self.ReadByte() + return + case I16: + _, err = self.ReadI16() + return + case I32: + _, err = self.ReadI32() + return + case I64: + _, err = self.ReadI64() + return + case DOUBLE: + _, err = self.ReadDouble() + return + case STRING: + _, err = self.ReadString() + return + case STRUCT: + if _, err = self.ReadStructBegin(); err != nil { + return err + } + for { + _, typeId, _, _ := self.ReadFieldBegin() + if typeId == STOP { + break + } + err := Skip(self, typeId, maxDepth-1) + if err != nil { + return err + } + self.ReadFieldEnd() + } + return self.ReadStructEnd() + case MAP: + keyType, valueType, size, err := self.ReadMapBegin() + if err != nil { + return err + } + for i := 0; i < size; i++ { + err := Skip(self, keyType, maxDepth-1) + if err != nil { + return err + } + self.Skip(valueType) + } + return self.ReadMapEnd() + case SET: + elemType, size, err := self.ReadSetBegin() + if err != nil { + return err + } + for i := 0; i < size; i++ { + err := Skip(self, elemType, maxDepth-1) + if err != nil { + return err + } + } + return self.ReadSetEnd() + case LIST: + elemType, size, err := self.ReadListBegin() + if err != nil { + return err + } + for i := 0; i < size; i++ { + err := Skip(self, elemType, maxDepth-1) + if err != nil { + return err + } + } + return self.ReadListEnd() + } + return nil +} diff --git a/vendor/github.com/uber/jaeger-client-go/thrift/protocol_exception.go b/vendor/github.com/uber/jaeger-client-go/thrift/protocol_exception.go new file mode 100644 index 000000000000..6e357ee890df --- /dev/null +++ b/vendor/github.com/uber/jaeger-client-go/thrift/protocol_exception.go @@ -0,0 +1,78 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package thrift + +import ( + "encoding/base64" +) + +// Thrift Protocol exception +type TProtocolException interface { + TException + TypeId() int +} + +const ( + UNKNOWN_PROTOCOL_EXCEPTION = 0 + INVALID_DATA = 1 + NEGATIVE_SIZE = 2 + SIZE_LIMIT = 3 + BAD_VERSION = 4 + NOT_IMPLEMENTED = 5 + DEPTH_LIMIT = 6 +) + +type tProtocolException struct { + typeId int + message string +} + +func (p *tProtocolException) TypeId() int { + return p.typeId +} + +func (p *tProtocolException) String() string { + return p.message +} + +func (p *tProtocolException) Error() string { + return p.message +} + +func NewTProtocolException(err error) TProtocolException { + if err == nil { + return nil + } + if e,ok := err.(TProtocolException); ok { + return e + } + if _, ok := err.(base64.CorruptInputError); ok { + return &tProtocolException{INVALID_DATA, err.Error()} + } + return &tProtocolException{UNKNOWN_PROTOCOL_EXCEPTION, err.Error()} +} + +func NewTProtocolExceptionWithType(errType int, err error) TProtocolException { + if err == nil { + return nil + } + return &tProtocolException{errType, err.Error()} +} + diff --git a/vendor/github.com/uber/jaeger-client-go/thrift/protocol_factory.go b/vendor/github.com/uber/jaeger-client-go/thrift/protocol_factory.go new file mode 100644 index 000000000000..c40f796d886a --- /dev/null +++ b/vendor/github.com/uber/jaeger-client-go/thrift/protocol_factory.go @@ -0,0 +1,25 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package thrift + +// Factory interface for constructing protocol instances. +type TProtocolFactory interface { + GetProtocol(trans TTransport) TProtocol +} diff --git a/vendor/github.com/uber/jaeger-client-go/thrift/rich_transport.go b/vendor/github.com/uber/jaeger-client-go/thrift/rich_transport.go new file mode 100644 index 000000000000..8e296a99b5f9 --- /dev/null +++ b/vendor/github.com/uber/jaeger-client-go/thrift/rich_transport.go @@ -0,0 +1,69 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package thrift + +import "io" + +type RichTransport struct { + TTransport +} + +// Wraps Transport to provide TRichTransport interface +func NewTRichTransport(trans TTransport) *RichTransport { + return &RichTransport{trans} +} + +func (r *RichTransport) ReadByte() (c byte, err error) { + return readByte(r.TTransport) +} + +func (r *RichTransport) WriteByte(c byte) error { + return writeByte(r.TTransport, c) +} + +func (r *RichTransport) WriteString(s string) (n int, err error) { + return r.Write([]byte(s)) +} + +func (r *RichTransport) RemainingBytes() (num_bytes uint64) { + return r.TTransport.RemainingBytes() +} + +func readByte(r io.Reader) (c byte, err error) { + v := [1]byte{0} + n, err := r.Read(v[0:1]) + if n > 0 && (err == nil || err == io.EOF) { + return v[0], nil + } + if n > 0 && err != nil { + return v[0], err + } + if err != nil { + return 0, err + } + return v[0], nil +} + +func writeByte(w io.Writer, c byte) error { + v := [1]byte{c} + _, err := w.Write(v[0:1]) + return err +} + diff --git a/vendor/github.com/uber/jaeger-client-go/thrift/serializer.go b/vendor/github.com/uber/jaeger-client-go/thrift/serializer.go new file mode 100644 index 000000000000..771222999091 --- /dev/null +++ b/vendor/github.com/uber/jaeger-client-go/thrift/serializer.go @@ -0,0 +1,75 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package thrift + +type TSerializer struct { + Transport *TMemoryBuffer + Protocol TProtocol +} + +type TStruct interface { + Write(p TProtocol) error + Read(p TProtocol) error +} + +func NewTSerializer() *TSerializer { + transport := NewTMemoryBufferLen(1024) + protocol := NewTBinaryProtocolFactoryDefault().GetProtocol(transport) + + return &TSerializer{ + transport, + protocol} +} + +func (t *TSerializer) WriteString(msg TStruct) (s string, err error) { + t.Transport.Reset() + + if err = msg.Write(t.Protocol); err != nil { + return + } + + if err = t.Protocol.Flush(); err != nil { + return + } + if err = t.Transport.Flush(); err != nil { + return + } + + return t.Transport.String(), nil +} + +func (t *TSerializer) Write(msg TStruct) (b []byte, err error) { + t.Transport.Reset() + + if err = msg.Write(t.Protocol); err != nil { + return + } + + if err = t.Protocol.Flush(); err != nil { + return + } + + if err = t.Transport.Flush(); err != nil { + return + } + + b = append(b, t.Transport.Bytes()...) + return +} diff --git a/vendor/github.com/uber/jaeger-client-go/thrift/simple_json_protocol.go b/vendor/github.com/uber/jaeger-client-go/thrift/simple_json_protocol.go new file mode 100644 index 000000000000..412a482d055a --- /dev/null +++ b/vendor/github.com/uber/jaeger-client-go/thrift/simple_json_protocol.go @@ -0,0 +1,1337 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package thrift + +import ( + "bufio" + "bytes" + "encoding/base64" + "encoding/json" + "fmt" + "io" + "math" + "strconv" +) + +type _ParseContext int + +const ( + _CONTEXT_IN_TOPLEVEL _ParseContext = 1 + _CONTEXT_IN_LIST_FIRST _ParseContext = 2 + _CONTEXT_IN_LIST _ParseContext = 3 + _CONTEXT_IN_OBJECT_FIRST _ParseContext = 4 + _CONTEXT_IN_OBJECT_NEXT_KEY _ParseContext = 5 + _CONTEXT_IN_OBJECT_NEXT_VALUE _ParseContext = 6 +) + +func (p _ParseContext) String() string { + switch p { + case _CONTEXT_IN_TOPLEVEL: + return "TOPLEVEL" + case _CONTEXT_IN_LIST_FIRST: + return "LIST-FIRST" + case _CONTEXT_IN_LIST: + return "LIST" + case _CONTEXT_IN_OBJECT_FIRST: + return "OBJECT-FIRST" + case _CONTEXT_IN_OBJECT_NEXT_KEY: + return "OBJECT-NEXT-KEY" + case _CONTEXT_IN_OBJECT_NEXT_VALUE: + return "OBJECT-NEXT-VALUE" + } + return "UNKNOWN-PARSE-CONTEXT" +} + +// JSON protocol implementation for thrift. +// +// This protocol produces/consumes a simple output format +// suitable for parsing by scripting languages. It should not be +// confused with the full-featured TJSONProtocol. +// +type TSimpleJSONProtocol struct { + trans TTransport + + parseContextStack []int + dumpContext []int + + writer *bufio.Writer + reader *bufio.Reader +} + +// Constructor +func NewTSimpleJSONProtocol(t TTransport) *TSimpleJSONProtocol { + v := &TSimpleJSONProtocol{trans: t, + writer: bufio.NewWriter(t), + reader: bufio.NewReader(t), + } + v.parseContextStack = append(v.parseContextStack, int(_CONTEXT_IN_TOPLEVEL)) + v.dumpContext = append(v.dumpContext, int(_CONTEXT_IN_TOPLEVEL)) + return v +} + +// Factory +type TSimpleJSONProtocolFactory struct{} + +func (p *TSimpleJSONProtocolFactory) GetProtocol(trans TTransport) TProtocol { + return NewTSimpleJSONProtocol(trans) +} + +func NewTSimpleJSONProtocolFactory() *TSimpleJSONProtocolFactory { + return &TSimpleJSONProtocolFactory{} +} + +var ( + JSON_COMMA []byte + JSON_COLON []byte + JSON_LBRACE []byte + JSON_RBRACE []byte + JSON_LBRACKET []byte + JSON_RBRACKET []byte + JSON_QUOTE byte + JSON_QUOTE_BYTES []byte + JSON_NULL []byte + JSON_TRUE []byte + JSON_FALSE []byte + JSON_INFINITY string + JSON_NEGATIVE_INFINITY string + JSON_NAN string + JSON_INFINITY_BYTES []byte + JSON_NEGATIVE_INFINITY_BYTES []byte + JSON_NAN_BYTES []byte + json_nonbase_map_elem_bytes []byte +) + +func init() { + JSON_COMMA = []byte{','} + JSON_COLON = []byte{':'} + JSON_LBRACE = []byte{'{'} + JSON_RBRACE = []byte{'}'} + JSON_LBRACKET = []byte{'['} + JSON_RBRACKET = []byte{']'} + JSON_QUOTE = '"' + JSON_QUOTE_BYTES = []byte{'"'} + JSON_NULL = []byte{'n', 'u', 'l', 'l'} + JSON_TRUE = []byte{'t', 'r', 'u', 'e'} + JSON_FALSE = []byte{'f', 'a', 'l', 's', 'e'} + JSON_INFINITY = "Infinity" + JSON_NEGATIVE_INFINITY = "-Infinity" + JSON_NAN = "NaN" + JSON_INFINITY_BYTES = []byte{'I', 'n', 'f', 'i', 'n', 'i', 't', 'y'} + JSON_NEGATIVE_INFINITY_BYTES = []byte{'-', 'I', 'n', 'f', 'i', 'n', 'i', 't', 'y'} + JSON_NAN_BYTES = []byte{'N', 'a', 'N'} + json_nonbase_map_elem_bytes = []byte{']', ',', '['} +} + +func jsonQuote(s string) string { + b, _ := json.Marshal(s) + s1 := string(b) + return s1 +} + +func jsonUnquote(s string) (string, bool) { + s1 := new(string) + err := json.Unmarshal([]byte(s), s1) + return *s1, err == nil +} + +func mismatch(expected, actual string) error { + return fmt.Errorf("Expected '%s' but found '%s' while parsing JSON.", expected, actual) +} + +func (p *TSimpleJSONProtocol) WriteMessageBegin(name string, typeId TMessageType, seqId int32) error { + p.resetContextStack() // THRIFT-3735 + if e := p.OutputListBegin(); e != nil { + return e + } + if e := p.WriteString(name); e != nil { + return e + } + if e := p.WriteByte(int8(typeId)); e != nil { + return e + } + if e := p.WriteI32(seqId); e != nil { + return e + } + return nil +} + +func (p *TSimpleJSONProtocol) WriteMessageEnd() error { + return p.OutputListEnd() +} + +func (p *TSimpleJSONProtocol) WriteStructBegin(name string) error { + if e := p.OutputObjectBegin(); e != nil { + return e + } + return nil +} + +func (p *TSimpleJSONProtocol) WriteStructEnd() error { + return p.OutputObjectEnd() +} + +func (p *TSimpleJSONProtocol) WriteFieldBegin(name string, typeId TType, id int16) error { + if e := p.WriteString(name); e != nil { + return e + } + return nil +} + +func (p *TSimpleJSONProtocol) WriteFieldEnd() error { + //return p.OutputListEnd() + return nil +} + +func (p *TSimpleJSONProtocol) WriteFieldStop() error { return nil } + +func (p *TSimpleJSONProtocol) WriteMapBegin(keyType TType, valueType TType, size int) error { + if e := p.OutputListBegin(); e != nil { + return e + } + if e := p.WriteByte(int8(keyType)); e != nil { + return e + } + if e := p.WriteByte(int8(valueType)); e != nil { + return e + } + return p.WriteI32(int32(size)) +} + +func (p *TSimpleJSONProtocol) WriteMapEnd() error { + return p.OutputListEnd() +} + +func (p *TSimpleJSONProtocol) WriteListBegin(elemType TType, size int) error { + return p.OutputElemListBegin(elemType, size) +} + +func (p *TSimpleJSONProtocol) WriteListEnd() error { + return p.OutputListEnd() +} + +func (p *TSimpleJSONProtocol) WriteSetBegin(elemType TType, size int) error { + return p.OutputElemListBegin(elemType, size) +} + +func (p *TSimpleJSONProtocol) WriteSetEnd() error { + return p.OutputListEnd() +} + +func (p *TSimpleJSONProtocol) WriteBool(b bool) error { + return p.OutputBool(b) +} + +func (p *TSimpleJSONProtocol) WriteByte(b int8) error { + return p.WriteI32(int32(b)) +} + +func (p *TSimpleJSONProtocol) WriteI16(v int16) error { + return p.WriteI32(int32(v)) +} + +func (p *TSimpleJSONProtocol) WriteI32(v int32) error { + return p.OutputI64(int64(v)) +} + +func (p *TSimpleJSONProtocol) WriteI64(v int64) error { + return p.OutputI64(int64(v)) +} + +func (p *TSimpleJSONProtocol) WriteDouble(v float64) error { + return p.OutputF64(v) +} + +func (p *TSimpleJSONProtocol) WriteString(v string) error { + return p.OutputString(v) +} + +func (p *TSimpleJSONProtocol) WriteBinary(v []byte) error { + // JSON library only takes in a string, + // not an arbitrary byte array, to ensure bytes are transmitted + // efficiently we must convert this into a valid JSON string + // therefore we use base64 encoding to avoid excessive escaping/quoting + if e := p.OutputPreValue(); e != nil { + return e + } + if _, e := p.write(JSON_QUOTE_BYTES); e != nil { + return NewTProtocolException(e) + } + writer := base64.NewEncoder(base64.StdEncoding, p.writer) + if _, e := writer.Write(v); e != nil { + p.writer.Reset(p.trans) // THRIFT-3735 + return NewTProtocolException(e) + } + if e := writer.Close(); e != nil { + return NewTProtocolException(e) + } + if _, e := p.write(JSON_QUOTE_BYTES); e != nil { + return NewTProtocolException(e) + } + return p.OutputPostValue() +} + +// Reading methods. +func (p *TSimpleJSONProtocol) ReadMessageBegin() (name string, typeId TMessageType, seqId int32, err error) { + p.resetContextStack() // THRIFT-3735 + if isNull, err := p.ParseListBegin(); isNull || err != nil { + return name, typeId, seqId, err + } + if name, err = p.ReadString(); err != nil { + return name, typeId, seqId, err + } + bTypeId, err := p.ReadByte() + typeId = TMessageType(bTypeId) + if err != nil { + return name, typeId, seqId, err + } + if seqId, err = p.ReadI32(); err != nil { + return name, typeId, seqId, err + } + return name, typeId, seqId, nil +} + +func (p *TSimpleJSONProtocol) ReadMessageEnd() error { + return p.ParseListEnd() +} + +func (p *TSimpleJSONProtocol) ReadStructBegin() (name string, err error) { + _, err = p.ParseObjectStart() + return "", err +} + +func (p *TSimpleJSONProtocol) ReadStructEnd() error { + return p.ParseObjectEnd() +} + +func (p *TSimpleJSONProtocol) ReadFieldBegin() (string, TType, int16, error) { + if err := p.ParsePreValue(); err != nil { + return "", STOP, 0, err + } + b, _ := p.reader.Peek(1) + if len(b) > 0 { + switch b[0] { + case JSON_RBRACE[0]: + return "", STOP, 0, nil + case JSON_QUOTE: + p.reader.ReadByte() + name, err := p.ParseStringBody() + // simplejson is not meant to be read back into thrift + // - see http://wiki.apache.org/thrift/ThriftUsageJava + // - use JSON instead + if err != nil { + return name, STOP, 0, err + } + return name, STOP, -1, p.ParsePostValue() + /* + if err = p.ParsePostValue(); err != nil { + return name, STOP, 0, err + } + if isNull, err := p.ParseListBegin(); isNull || err != nil { + return name, STOP, 0, err + } + bType, err := p.ReadByte() + thetype := TType(bType) + if err != nil { + return name, thetype, 0, err + } + id, err := p.ReadI16() + return name, thetype, id, err + */ + } + e := fmt.Errorf("Expected \"}\" or '\"', but found: '%s'", string(b)) + return "", STOP, 0, NewTProtocolExceptionWithType(INVALID_DATA, e) + } + return "", STOP, 0, NewTProtocolException(io.EOF) +} + +func (p *TSimpleJSONProtocol) ReadFieldEnd() error { + return nil + //return p.ParseListEnd() +} + +func (p *TSimpleJSONProtocol) ReadMapBegin() (keyType TType, valueType TType, size int, e error) { + if isNull, e := p.ParseListBegin(); isNull || e != nil { + return VOID, VOID, 0, e + } + + // read keyType + bKeyType, e := p.ReadByte() + keyType = TType(bKeyType) + if e != nil { + return keyType, valueType, size, e + } + + // read valueType + bValueType, e := p.ReadByte() + valueType = TType(bValueType) + if e != nil { + return keyType, valueType, size, e + } + + // read size + iSize, err := p.ReadI64() + size = int(iSize) + return keyType, valueType, size, err +} + +func (p *TSimpleJSONProtocol) ReadMapEnd() error { + return p.ParseListEnd() +} + +func (p *TSimpleJSONProtocol) ReadListBegin() (elemType TType, size int, e error) { + return p.ParseElemListBegin() +} + +func (p *TSimpleJSONProtocol) ReadListEnd() error { + return p.ParseListEnd() +} + +func (p *TSimpleJSONProtocol) ReadSetBegin() (elemType TType, size int, e error) { + return p.ParseElemListBegin() +} + +func (p *TSimpleJSONProtocol) ReadSetEnd() error { + return p.ParseListEnd() +} + +func (p *TSimpleJSONProtocol) ReadBool() (bool, error) { + var value bool + + if err := p.ParsePreValue(); err != nil { + return value, err + } + f, _ := p.reader.Peek(1) + if len(f) > 0 { + switch f[0] { + case JSON_TRUE[0]: + b := make([]byte, len(JSON_TRUE)) + _, err := p.reader.Read(b) + if err != nil { + return false, NewTProtocolException(err) + } + if string(b) == string(JSON_TRUE) { + value = true + } else { + e := fmt.Errorf("Expected \"true\" but found: %s", string(b)) + return value, NewTProtocolExceptionWithType(INVALID_DATA, e) + } + break + case JSON_FALSE[0]: + b := make([]byte, len(JSON_FALSE)) + _, err := p.reader.Read(b) + if err != nil { + return false, NewTProtocolException(err) + } + if string(b) == string(JSON_FALSE) { + value = false + } else { + e := fmt.Errorf("Expected \"false\" but found: %s", string(b)) + return value, NewTProtocolExceptionWithType(INVALID_DATA, e) + } + break + case JSON_NULL[0]: + b := make([]byte, len(JSON_NULL)) + _, err := p.reader.Read(b) + if err != nil { + return false, NewTProtocolException(err) + } + if string(b) == string(JSON_NULL) { + value = false + } else { + e := fmt.Errorf("Expected \"null\" but found: %s", string(b)) + return value, NewTProtocolExceptionWithType(INVALID_DATA, e) + } + default: + e := fmt.Errorf("Expected \"true\", \"false\", or \"null\" but found: %s", string(f)) + return value, NewTProtocolExceptionWithType(INVALID_DATA, e) + } + } + return value, p.ParsePostValue() +} + +func (p *TSimpleJSONProtocol) ReadByte() (int8, error) { + v, err := p.ReadI64() + return int8(v), err +} + +func (p *TSimpleJSONProtocol) ReadI16() (int16, error) { + v, err := p.ReadI64() + return int16(v), err +} + +func (p *TSimpleJSONProtocol) ReadI32() (int32, error) { + v, err := p.ReadI64() + return int32(v), err +} + +func (p *TSimpleJSONProtocol) ReadI64() (int64, error) { + v, _, err := p.ParseI64() + return v, err +} + +func (p *TSimpleJSONProtocol) ReadDouble() (float64, error) { + v, _, err := p.ParseF64() + return v, err +} + +func (p *TSimpleJSONProtocol) ReadString() (string, error) { + var v string + if err := p.ParsePreValue(); err != nil { + return v, err + } + f, _ := p.reader.Peek(1) + if len(f) > 0 && f[0] == JSON_QUOTE { + p.reader.ReadByte() + value, err := p.ParseStringBody() + v = value + if err != nil { + return v, err + } + } else if len(f) > 0 && f[0] == JSON_NULL[0] { + b := make([]byte, len(JSON_NULL)) + _, err := p.reader.Read(b) + if err != nil { + return v, NewTProtocolException(err) + } + if string(b) != string(JSON_NULL) { + e := fmt.Errorf("Expected a JSON string, found unquoted data started with %s", string(b)) + return v, NewTProtocolExceptionWithType(INVALID_DATA, e) + } + } else { + e := fmt.Errorf("Expected a JSON string, found unquoted data started with %s", string(f)) + return v, NewTProtocolExceptionWithType(INVALID_DATA, e) + } + return v, p.ParsePostValue() +} + +func (p *TSimpleJSONProtocol) ReadBinary() ([]byte, error) { + var v []byte + if err := p.ParsePreValue(); err != nil { + return nil, err + } + f, _ := p.reader.Peek(1) + if len(f) > 0 && f[0] == JSON_QUOTE { + p.reader.ReadByte() + value, err := p.ParseBase64EncodedBody() + v = value + if err != nil { + return v, err + } + } else if len(f) > 0 && f[0] == JSON_NULL[0] { + b := make([]byte, len(JSON_NULL)) + _, err := p.reader.Read(b) + if err != nil { + return v, NewTProtocolException(err) + } + if string(b) != string(JSON_NULL) { + e := fmt.Errorf("Expected a JSON string, found unquoted data started with %s", string(b)) + return v, NewTProtocolExceptionWithType(INVALID_DATA, e) + } + } else { + e := fmt.Errorf("Expected a JSON string, found unquoted data started with %s", string(f)) + return v, NewTProtocolExceptionWithType(INVALID_DATA, e) + } + + return v, p.ParsePostValue() +} + +func (p *TSimpleJSONProtocol) Flush() (err error) { + return NewTProtocolException(p.writer.Flush()) +} + +func (p *TSimpleJSONProtocol) Skip(fieldType TType) (err error) { + return SkipDefaultDepth(p, fieldType) +} + +func (p *TSimpleJSONProtocol) Transport() TTransport { + return p.trans +} + +func (p *TSimpleJSONProtocol) OutputPreValue() error { + cxt := _ParseContext(p.dumpContext[len(p.dumpContext)-1]) + switch cxt { + case _CONTEXT_IN_LIST, _CONTEXT_IN_OBJECT_NEXT_KEY: + if _, e := p.write(JSON_COMMA); e != nil { + return NewTProtocolException(e) + } + break + case _CONTEXT_IN_OBJECT_NEXT_VALUE: + if _, e := p.write(JSON_COLON); e != nil { + return NewTProtocolException(e) + } + break + } + return nil +} + +func (p *TSimpleJSONProtocol) OutputPostValue() error { + cxt := _ParseContext(p.dumpContext[len(p.dumpContext)-1]) + switch cxt { + case _CONTEXT_IN_LIST_FIRST: + p.dumpContext = p.dumpContext[:len(p.dumpContext)-1] + p.dumpContext = append(p.dumpContext, int(_CONTEXT_IN_LIST)) + break + case _CONTEXT_IN_OBJECT_FIRST: + p.dumpContext = p.dumpContext[:len(p.dumpContext)-1] + p.dumpContext = append(p.dumpContext, int(_CONTEXT_IN_OBJECT_NEXT_VALUE)) + break + case _CONTEXT_IN_OBJECT_NEXT_KEY: + p.dumpContext = p.dumpContext[:len(p.dumpContext)-1] + p.dumpContext = append(p.dumpContext, int(_CONTEXT_IN_OBJECT_NEXT_VALUE)) + break + case _CONTEXT_IN_OBJECT_NEXT_VALUE: + p.dumpContext = p.dumpContext[:len(p.dumpContext)-1] + p.dumpContext = append(p.dumpContext, int(_CONTEXT_IN_OBJECT_NEXT_KEY)) + break + } + return nil +} + +func (p *TSimpleJSONProtocol) OutputBool(value bool) error { + if e := p.OutputPreValue(); e != nil { + return e + } + var v string + if value { + v = string(JSON_TRUE) + } else { + v = string(JSON_FALSE) + } + switch _ParseContext(p.dumpContext[len(p.dumpContext)-1]) { + case _CONTEXT_IN_OBJECT_FIRST, _CONTEXT_IN_OBJECT_NEXT_KEY: + v = jsonQuote(v) + default: + } + if e := p.OutputStringData(v); e != nil { + return e + } + return p.OutputPostValue() +} + +func (p *TSimpleJSONProtocol) OutputNull() error { + if e := p.OutputPreValue(); e != nil { + return e + } + if _, e := p.write(JSON_NULL); e != nil { + return NewTProtocolException(e) + } + return p.OutputPostValue() +} + +func (p *TSimpleJSONProtocol) OutputF64(value float64) error { + if e := p.OutputPreValue(); e != nil { + return e + } + var v string + if math.IsNaN(value) { + v = string(JSON_QUOTE) + JSON_NAN + string(JSON_QUOTE) + } else if math.IsInf(value, 1) { + v = string(JSON_QUOTE) + JSON_INFINITY + string(JSON_QUOTE) + } else if math.IsInf(value, -1) { + v = string(JSON_QUOTE) + JSON_NEGATIVE_INFINITY + string(JSON_QUOTE) + } else { + v = strconv.FormatFloat(value, 'g', -1, 64) + switch _ParseContext(p.dumpContext[len(p.dumpContext)-1]) { + case _CONTEXT_IN_OBJECT_FIRST, _CONTEXT_IN_OBJECT_NEXT_KEY: + v = string(JSON_QUOTE) + v + string(JSON_QUOTE) + default: + } + } + if e := p.OutputStringData(v); e != nil { + return e + } + return p.OutputPostValue() +} + +func (p *TSimpleJSONProtocol) OutputI64(value int64) error { + if e := p.OutputPreValue(); e != nil { + return e + } + v := strconv.FormatInt(value, 10) + switch _ParseContext(p.dumpContext[len(p.dumpContext)-1]) { + case _CONTEXT_IN_OBJECT_FIRST, _CONTEXT_IN_OBJECT_NEXT_KEY: + v = jsonQuote(v) + default: + } + if e := p.OutputStringData(v); e != nil { + return e + } + return p.OutputPostValue() +} + +func (p *TSimpleJSONProtocol) OutputString(s string) error { + if e := p.OutputPreValue(); e != nil { + return e + } + if e := p.OutputStringData(jsonQuote(s)); e != nil { + return e + } + return p.OutputPostValue() +} + +func (p *TSimpleJSONProtocol) OutputStringData(s string) error { + _, e := p.write([]byte(s)) + return NewTProtocolException(e) +} + +func (p *TSimpleJSONProtocol) OutputObjectBegin() error { + if e := p.OutputPreValue(); e != nil { + return e + } + if _, e := p.write(JSON_LBRACE); e != nil { + return NewTProtocolException(e) + } + p.dumpContext = append(p.dumpContext, int(_CONTEXT_IN_OBJECT_FIRST)) + return nil +} + +func (p *TSimpleJSONProtocol) OutputObjectEnd() error { + if _, e := p.write(JSON_RBRACE); e != nil { + return NewTProtocolException(e) + } + p.dumpContext = p.dumpContext[:len(p.dumpContext)-1] + if e := p.OutputPostValue(); e != nil { + return e + } + return nil +} + +func (p *TSimpleJSONProtocol) OutputListBegin() error { + if e := p.OutputPreValue(); e != nil { + return e + } + if _, e := p.write(JSON_LBRACKET); e != nil { + return NewTProtocolException(e) + } + p.dumpContext = append(p.dumpContext, int(_CONTEXT_IN_LIST_FIRST)) + return nil +} + +func (p *TSimpleJSONProtocol) OutputListEnd() error { + if _, e := p.write(JSON_RBRACKET); e != nil { + return NewTProtocolException(e) + } + p.dumpContext = p.dumpContext[:len(p.dumpContext)-1] + if e := p.OutputPostValue(); e != nil { + return e + } + return nil +} + +func (p *TSimpleJSONProtocol) OutputElemListBegin(elemType TType, size int) error { + if e := p.OutputListBegin(); e != nil { + return e + } + if e := p.WriteByte(int8(elemType)); e != nil { + return e + } + if e := p.WriteI64(int64(size)); e != nil { + return e + } + return nil +} + +func (p *TSimpleJSONProtocol) ParsePreValue() error { + if e := p.readNonSignificantWhitespace(); e != nil { + return NewTProtocolException(e) + } + cxt := _ParseContext(p.parseContextStack[len(p.parseContextStack)-1]) + b, _ := p.reader.Peek(1) + switch cxt { + case _CONTEXT_IN_LIST: + if len(b) > 0 { + switch b[0] { + case JSON_RBRACKET[0]: + return nil + case JSON_COMMA[0]: + p.reader.ReadByte() + if e := p.readNonSignificantWhitespace(); e != nil { + return NewTProtocolException(e) + } + return nil + default: + e := fmt.Errorf("Expected \"]\" or \",\" in list context, but found \"%s\"", string(b)) + return NewTProtocolExceptionWithType(INVALID_DATA, e) + } + } + break + case _CONTEXT_IN_OBJECT_NEXT_KEY: + if len(b) > 0 { + switch b[0] { + case JSON_RBRACE[0]: + return nil + case JSON_COMMA[0]: + p.reader.ReadByte() + if e := p.readNonSignificantWhitespace(); e != nil { + return NewTProtocolException(e) + } + return nil + default: + e := fmt.Errorf("Expected \"}\" or \",\" in object context, but found \"%s\"", string(b)) + return NewTProtocolExceptionWithType(INVALID_DATA, e) + } + } + break + case _CONTEXT_IN_OBJECT_NEXT_VALUE: + if len(b) > 0 { + switch b[0] { + case JSON_COLON[0]: + p.reader.ReadByte() + if e := p.readNonSignificantWhitespace(); e != nil { + return NewTProtocolException(e) + } + return nil + default: + e := fmt.Errorf("Expected \":\" in object context, but found \"%s\"", string(b)) + return NewTProtocolExceptionWithType(INVALID_DATA, e) + } + } + break + } + return nil +} + +func (p *TSimpleJSONProtocol) ParsePostValue() error { + if e := p.readNonSignificantWhitespace(); e != nil { + return NewTProtocolException(e) + } + cxt := _ParseContext(p.parseContextStack[len(p.parseContextStack)-1]) + switch cxt { + case _CONTEXT_IN_LIST_FIRST: + p.parseContextStack = p.parseContextStack[:len(p.parseContextStack)-1] + p.parseContextStack = append(p.parseContextStack, int(_CONTEXT_IN_LIST)) + break + case _CONTEXT_IN_OBJECT_FIRST, _CONTEXT_IN_OBJECT_NEXT_KEY: + p.parseContextStack = p.parseContextStack[:len(p.parseContextStack)-1] + p.parseContextStack = append(p.parseContextStack, int(_CONTEXT_IN_OBJECT_NEXT_VALUE)) + break + case _CONTEXT_IN_OBJECT_NEXT_VALUE: + p.parseContextStack = p.parseContextStack[:len(p.parseContextStack)-1] + p.parseContextStack = append(p.parseContextStack, int(_CONTEXT_IN_OBJECT_NEXT_KEY)) + break + } + return nil +} + +func (p *TSimpleJSONProtocol) readNonSignificantWhitespace() error { + for { + b, _ := p.reader.Peek(1) + if len(b) < 1 { + return nil + } + switch b[0] { + case ' ', '\r', '\n', '\t': + p.reader.ReadByte() + continue + default: + break + } + break + } + return nil +} + +func (p *TSimpleJSONProtocol) ParseStringBody() (string, error) { + line, err := p.reader.ReadString(JSON_QUOTE) + if err != nil { + return "", NewTProtocolException(err) + } + l := len(line) + // count number of escapes to see if we need to keep going + i := 1 + for ; i < l; i++ { + if line[l-i-1] != '\\' { + break + } + } + if i&0x01 == 1 { + v, ok := jsonUnquote(string(JSON_QUOTE) + line) + if !ok { + return "", NewTProtocolException(err) + } + return v, nil + } + s, err := p.ParseQuotedStringBody() + if err != nil { + return "", NewTProtocolException(err) + } + str := string(JSON_QUOTE) + line + s + v, ok := jsonUnquote(str) + if !ok { + e := fmt.Errorf("Unable to parse as JSON string %s", str) + return "", NewTProtocolExceptionWithType(INVALID_DATA, e) + } + return v, nil +} + +func (p *TSimpleJSONProtocol) ParseQuotedStringBody() (string, error) { + line, err := p.reader.ReadString(JSON_QUOTE) + if err != nil { + return "", NewTProtocolException(err) + } + l := len(line) + // count number of escapes to see if we need to keep going + i := 1 + for ; i < l; i++ { + if line[l-i-1] != '\\' { + break + } + } + if i&0x01 == 1 { + return line, nil + } + s, err := p.ParseQuotedStringBody() + if err != nil { + return "", NewTProtocolException(err) + } + v := line + s + return v, nil +} + +func (p *TSimpleJSONProtocol) ParseBase64EncodedBody() ([]byte, error) { + line, err := p.reader.ReadBytes(JSON_QUOTE) + if err != nil { + return line, NewTProtocolException(err) + } + line2 := line[0 : len(line)-1] + l := len(line2) + if (l % 4) != 0 { + pad := 4 - (l % 4) + fill := [...]byte{'=', '=', '='} + line2 = append(line2, fill[:pad]...) + l = len(line2) + } + output := make([]byte, base64.StdEncoding.DecodedLen(l)) + n, err := base64.StdEncoding.Decode(output, line2) + return output[0:n], NewTProtocolException(err) +} + +func (p *TSimpleJSONProtocol) ParseI64() (int64, bool, error) { + if err := p.ParsePreValue(); err != nil { + return 0, false, err + } + var value int64 + var isnull bool + if p.safePeekContains(JSON_NULL) { + p.reader.Read(make([]byte, len(JSON_NULL))) + isnull = true + } else { + num, err := p.readNumeric() + isnull = (num == nil) + if !isnull { + value = num.Int64() + } + if err != nil { + return value, isnull, err + } + } + return value, isnull, p.ParsePostValue() +} + +func (p *TSimpleJSONProtocol) ParseF64() (float64, bool, error) { + if err := p.ParsePreValue(); err != nil { + return 0, false, err + } + var value float64 + var isnull bool + if p.safePeekContains(JSON_NULL) { + p.reader.Read(make([]byte, len(JSON_NULL))) + isnull = true + } else { + num, err := p.readNumeric() + isnull = (num == nil) + if !isnull { + value = num.Float64() + } + if err != nil { + return value, isnull, err + } + } + return value, isnull, p.ParsePostValue() +} + +func (p *TSimpleJSONProtocol) ParseObjectStart() (bool, error) { + if err := p.ParsePreValue(); err != nil { + return false, err + } + var b []byte + b, err := p.reader.Peek(1) + if err != nil { + return false, err + } + if len(b) > 0 && b[0] == JSON_LBRACE[0] { + p.reader.ReadByte() + p.parseContextStack = append(p.parseContextStack, int(_CONTEXT_IN_OBJECT_FIRST)) + return false, nil + } else if p.safePeekContains(JSON_NULL) { + return true, nil + } + e := fmt.Errorf("Expected '{' or null, but found '%s'", string(b)) + return false, NewTProtocolExceptionWithType(INVALID_DATA, e) +} + +func (p *TSimpleJSONProtocol) ParseObjectEnd() error { + if isNull, err := p.readIfNull(); isNull || err != nil { + return err + } + cxt := _ParseContext(p.parseContextStack[len(p.parseContextStack)-1]) + if (cxt != _CONTEXT_IN_OBJECT_FIRST) && (cxt != _CONTEXT_IN_OBJECT_NEXT_KEY) { + e := fmt.Errorf("Expected to be in the Object Context, but not in Object Context (%d)", cxt) + return NewTProtocolExceptionWithType(INVALID_DATA, e) + } + line, err := p.reader.ReadString(JSON_RBRACE[0]) + if err != nil { + return NewTProtocolException(err) + } + for _, char := range line { + switch char { + default: + e := fmt.Errorf("Expecting end of object \"}\", but found: \"%s\"", line) + return NewTProtocolExceptionWithType(INVALID_DATA, e) + case ' ', '\n', '\r', '\t', '}': + break + } + } + p.parseContextStack = p.parseContextStack[:len(p.parseContextStack)-1] + return p.ParsePostValue() +} + +func (p *TSimpleJSONProtocol) ParseListBegin() (isNull bool, err error) { + if e := p.ParsePreValue(); e != nil { + return false, e + } + var b []byte + b, err = p.reader.Peek(1) + if err != nil { + return false, err + } + if len(b) >= 1 && b[0] == JSON_LBRACKET[0] { + p.parseContextStack = append(p.parseContextStack, int(_CONTEXT_IN_LIST_FIRST)) + p.reader.ReadByte() + isNull = false + } else if p.safePeekContains(JSON_NULL) { + isNull = true + } else { + err = fmt.Errorf("Expected \"null\" or \"[\", received %q", b) + } + return isNull, NewTProtocolExceptionWithType(INVALID_DATA, err) +} + +func (p *TSimpleJSONProtocol) ParseElemListBegin() (elemType TType, size int, e error) { + if isNull, e := p.ParseListBegin(); isNull || e != nil { + return VOID, 0, e + } + bElemType, err := p.ReadByte() + elemType = TType(bElemType) + if err != nil { + return elemType, size, err + } + nSize, err2 := p.ReadI64() + size = int(nSize) + return elemType, size, err2 +} + +func (p *TSimpleJSONProtocol) ParseListEnd() error { + if isNull, err := p.readIfNull(); isNull || err != nil { + return err + } + cxt := _ParseContext(p.parseContextStack[len(p.parseContextStack)-1]) + if cxt != _CONTEXT_IN_LIST { + e := fmt.Errorf("Expected to be in the List Context, but not in List Context (%d)", cxt) + return NewTProtocolExceptionWithType(INVALID_DATA, e) + } + line, err := p.reader.ReadString(JSON_RBRACKET[0]) + if err != nil { + return NewTProtocolException(err) + } + for _, char := range line { + switch char { + default: + e := fmt.Errorf("Expecting end of list \"]\", but found: \"%s\"", line) + return NewTProtocolExceptionWithType(INVALID_DATA, e) + case ' ', '\n', '\r', '\t', rune(JSON_RBRACKET[0]): + break + } + } + p.parseContextStack = p.parseContextStack[:len(p.parseContextStack)-1] + if _ParseContext(p.parseContextStack[len(p.parseContextStack)-1]) == _CONTEXT_IN_TOPLEVEL { + return nil + } + return p.ParsePostValue() +} + +func (p *TSimpleJSONProtocol) readSingleValue() (interface{}, TType, error) { + e := p.readNonSignificantWhitespace() + if e != nil { + return nil, VOID, NewTProtocolException(e) + } + b, e := p.reader.Peek(1) + if len(b) > 0 { + c := b[0] + switch c { + case JSON_NULL[0]: + buf := make([]byte, len(JSON_NULL)) + _, e := p.reader.Read(buf) + if e != nil { + return nil, VOID, NewTProtocolException(e) + } + if string(JSON_NULL) != string(buf) { + e = mismatch(string(JSON_NULL), string(buf)) + return nil, VOID, NewTProtocolExceptionWithType(INVALID_DATA, e) + } + return nil, VOID, nil + case JSON_QUOTE: + p.reader.ReadByte() + v, e := p.ParseStringBody() + if e != nil { + return v, UTF8, NewTProtocolException(e) + } + if v == JSON_INFINITY { + return INFINITY, DOUBLE, nil + } else if v == JSON_NEGATIVE_INFINITY { + return NEGATIVE_INFINITY, DOUBLE, nil + } else if v == JSON_NAN { + return NAN, DOUBLE, nil + } + return v, UTF8, nil + case JSON_TRUE[0]: + buf := make([]byte, len(JSON_TRUE)) + _, e := p.reader.Read(buf) + if e != nil { + return true, BOOL, NewTProtocolException(e) + } + if string(JSON_TRUE) != string(buf) { + e := mismatch(string(JSON_TRUE), string(buf)) + return true, BOOL, NewTProtocolExceptionWithType(INVALID_DATA, e) + } + return true, BOOL, nil + case JSON_FALSE[0]: + buf := make([]byte, len(JSON_FALSE)) + _, e := p.reader.Read(buf) + if e != nil { + return false, BOOL, NewTProtocolException(e) + } + if string(JSON_FALSE) != string(buf) { + e := mismatch(string(JSON_FALSE), string(buf)) + return false, BOOL, NewTProtocolExceptionWithType(INVALID_DATA, e) + } + return false, BOOL, nil + case JSON_LBRACKET[0]: + _, e := p.reader.ReadByte() + return make([]interface{}, 0), LIST, NewTProtocolException(e) + case JSON_LBRACE[0]: + _, e := p.reader.ReadByte() + return make(map[string]interface{}), STRUCT, NewTProtocolException(e) + case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'e', 'E', '.', '+', '-', JSON_INFINITY[0], JSON_NAN[0]: + // assume numeric + v, e := p.readNumeric() + return v, DOUBLE, e + default: + e := fmt.Errorf("Expected element in list but found '%s' while parsing JSON.", string(c)) + return nil, VOID, NewTProtocolExceptionWithType(INVALID_DATA, e) + } + } + e = fmt.Errorf("Cannot read a single element while parsing JSON.") + return nil, VOID, NewTProtocolExceptionWithType(INVALID_DATA, e) + +} + +func (p *TSimpleJSONProtocol) readIfNull() (bool, error) { + cont := true + for cont { + b, _ := p.reader.Peek(1) + if len(b) < 1 { + return false, nil + } + switch b[0] { + default: + return false, nil + case JSON_NULL[0]: + cont = false + break + case ' ', '\n', '\r', '\t': + p.reader.ReadByte() + break + } + } + if p.safePeekContains(JSON_NULL) { + p.reader.Read(make([]byte, len(JSON_NULL))) + return true, nil + } + return false, nil +} + +func (p *TSimpleJSONProtocol) readQuoteIfNext() { + b, _ := p.reader.Peek(1) + if len(b) > 0 && b[0] == JSON_QUOTE { + p.reader.ReadByte() + } +} + +func (p *TSimpleJSONProtocol) readNumeric() (Numeric, error) { + isNull, err := p.readIfNull() + if isNull || err != nil { + return NUMERIC_NULL, err + } + hasDecimalPoint := false + nextCanBeSign := true + hasE := false + MAX_LEN := 40 + buf := bytes.NewBuffer(make([]byte, 0, MAX_LEN)) + continueFor := true + inQuotes := false + for continueFor { + c, err := p.reader.ReadByte() + if err != nil { + if err == io.EOF { + break + } + return NUMERIC_NULL, NewTProtocolException(err) + } + switch c { + case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + buf.WriteByte(c) + nextCanBeSign = false + case '.': + if hasDecimalPoint { + e := fmt.Errorf("Unable to parse number with multiple decimal points '%s.'", buf.String()) + return NUMERIC_NULL, NewTProtocolExceptionWithType(INVALID_DATA, e) + } + if hasE { + e := fmt.Errorf("Unable to parse number with decimal points in the exponent '%s.'", buf.String()) + return NUMERIC_NULL, NewTProtocolExceptionWithType(INVALID_DATA, e) + } + buf.WriteByte(c) + hasDecimalPoint, nextCanBeSign = true, false + case 'e', 'E': + if hasE { + e := fmt.Errorf("Unable to parse number with multiple exponents '%s%c'", buf.String(), c) + return NUMERIC_NULL, NewTProtocolExceptionWithType(INVALID_DATA, e) + } + buf.WriteByte(c) + hasE, nextCanBeSign = true, true + case '-', '+': + if !nextCanBeSign { + e := fmt.Errorf("Negative sign within number") + return NUMERIC_NULL, NewTProtocolExceptionWithType(INVALID_DATA, e) + } + buf.WriteByte(c) + nextCanBeSign = false + case ' ', 0, '\t', '\n', '\r', JSON_RBRACE[0], JSON_RBRACKET[0], JSON_COMMA[0], JSON_COLON[0]: + p.reader.UnreadByte() + continueFor = false + case JSON_NAN[0]: + if buf.Len() == 0 { + buffer := make([]byte, len(JSON_NAN)) + buffer[0] = c + _, e := p.reader.Read(buffer[1:]) + if e != nil { + return NUMERIC_NULL, NewTProtocolException(e) + } + if JSON_NAN != string(buffer) { + e := mismatch(JSON_NAN, string(buffer)) + return NUMERIC_NULL, NewTProtocolExceptionWithType(INVALID_DATA, e) + } + if inQuotes { + p.readQuoteIfNext() + } + return NAN, nil + } else { + e := fmt.Errorf("Unable to parse number starting with character '%c'", c) + return NUMERIC_NULL, NewTProtocolExceptionWithType(INVALID_DATA, e) + } + case JSON_INFINITY[0]: + if buf.Len() == 0 || (buf.Len() == 1 && buf.Bytes()[0] == '+') { + buffer := make([]byte, len(JSON_INFINITY)) + buffer[0] = c + _, e := p.reader.Read(buffer[1:]) + if e != nil { + return NUMERIC_NULL, NewTProtocolException(e) + } + if JSON_INFINITY != string(buffer) { + e := mismatch(JSON_INFINITY, string(buffer)) + return NUMERIC_NULL, NewTProtocolExceptionWithType(INVALID_DATA, e) + } + if inQuotes { + p.readQuoteIfNext() + } + return INFINITY, nil + } else if buf.Len() == 1 && buf.Bytes()[0] == JSON_NEGATIVE_INFINITY[0] { + buffer := make([]byte, len(JSON_NEGATIVE_INFINITY)) + buffer[0] = JSON_NEGATIVE_INFINITY[0] + buffer[1] = c + _, e := p.reader.Read(buffer[2:]) + if e != nil { + return NUMERIC_NULL, NewTProtocolException(e) + } + if JSON_NEGATIVE_INFINITY != string(buffer) { + e := mismatch(JSON_NEGATIVE_INFINITY, string(buffer)) + return NUMERIC_NULL, NewTProtocolExceptionWithType(INVALID_DATA, e) + } + if inQuotes { + p.readQuoteIfNext() + } + return NEGATIVE_INFINITY, nil + } else { + e := fmt.Errorf("Unable to parse number starting with character '%c' due to existing buffer %s", c, buf.String()) + return NUMERIC_NULL, NewTProtocolExceptionWithType(INVALID_DATA, e) + } + case JSON_QUOTE: + if !inQuotes { + inQuotes = true + } else { + break + } + default: + e := fmt.Errorf("Unable to parse number starting with character '%c'", c) + return NUMERIC_NULL, NewTProtocolExceptionWithType(INVALID_DATA, e) + } + } + if buf.Len() == 0 { + e := fmt.Errorf("Unable to parse number from empty string ''") + return NUMERIC_NULL, NewTProtocolExceptionWithType(INVALID_DATA, e) + } + return NewNumericFromJSONString(buf.String(), false), nil +} + +// Safely peeks into the buffer, reading only what is necessary +func (p *TSimpleJSONProtocol) safePeekContains(b []byte) bool { + for i := 0; i < len(b); i++ { + a, _ := p.reader.Peek(i + 1) + if len(a) == 0 || a[i] != b[i] { + return false + } + } + return true +} + +// Reset the context stack to its initial state. +func (p *TSimpleJSONProtocol) resetContextStack() { + p.parseContextStack = []int{int(_CONTEXT_IN_TOPLEVEL)} + p.dumpContext = []int{int(_CONTEXT_IN_TOPLEVEL)} +} + +func (p *TSimpleJSONProtocol) write(b []byte) (int, error) { + n, err := p.writer.Write(b) + if err != nil { + p.writer.Reset(p.trans) // THRIFT-3735 + } + return n, err +} diff --git a/vendor/github.com/uber/jaeger-client-go/thrift/transport.go b/vendor/github.com/uber/jaeger-client-go/thrift/transport.go new file mode 100644 index 000000000000..453899651fc2 --- /dev/null +++ b/vendor/github.com/uber/jaeger-client-go/thrift/transport.go @@ -0,0 +1,68 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package thrift + +import ( + "errors" + "io" +) + +var errTransportInterrupted = errors.New("Transport Interrupted") + +type Flusher interface { + Flush() (err error) +} + +type ReadSizeProvider interface { + RemainingBytes() (num_bytes uint64) +} + + +// Encapsulates the I/O layer +type TTransport interface { + io.ReadWriteCloser + Flusher + ReadSizeProvider + + // Opens the transport for communication + Open() error + + // Returns true if the transport is open + IsOpen() bool +} + +type stringWriter interface { + WriteString(s string) (n int, err error) +} + + +// This is "enchanced" transport with extra capabilities. You need to use one of these +// to construct protocol. +// Notably, TSocket does not implement this interface, and it is always a mistake to use +// TSocket directly in protocol. +type TRichTransport interface { + io.ReadWriter + io.ByteReader + io.ByteWriter + stringWriter + Flusher + ReadSizeProvider +} + diff --git a/vendor/github.com/uber/jaeger-client-go/thrift/transport_exception.go b/vendor/github.com/uber/jaeger-client-go/thrift/transport_exception.go new file mode 100644 index 000000000000..9505b44612d0 --- /dev/null +++ b/vendor/github.com/uber/jaeger-client-go/thrift/transport_exception.go @@ -0,0 +1,90 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package thrift + +import ( + "errors" + "io" +) + +type timeoutable interface { + Timeout() bool +} + +// Thrift Transport exception +type TTransportException interface { + TException + TypeId() int + Err() error +} + +const ( + UNKNOWN_TRANSPORT_EXCEPTION = 0 + NOT_OPEN = 1 + ALREADY_OPEN = 2 + TIMED_OUT = 3 + END_OF_FILE = 4 +) + +type tTransportException struct { + typeId int + err error +} + +func (p *tTransportException) TypeId() int { + return p.typeId +} + +func (p *tTransportException) Error() string { + return p.err.Error() +} + +func (p *tTransportException) Err() error { + return p.err +} + +func NewTTransportException(t int, e string) TTransportException { + return &tTransportException{typeId: t, err: errors.New(e)} +} + +func NewTTransportExceptionFromError(e error) TTransportException { + if e == nil { + return nil + } + + if t, ok := e.(TTransportException); ok { + return t + } + + switch v := e.(type) { + case TTransportException: + return v + case timeoutable: + if v.Timeout() { + return &tTransportException{typeId: TIMED_OUT, err: e} + } + } + + if e == io.EOF { + return &tTransportException{typeId: END_OF_FILE, err: e} + } + + return &tTransportException{typeId: UNKNOWN_TRANSPORT_EXCEPTION, err: e} +} diff --git a/vendor/github.com/uber/jaeger-client-go/thrift/transport_factory.go b/vendor/github.com/uber/jaeger-client-go/thrift/transport_factory.go new file mode 100644 index 000000000000..533d1b437533 --- /dev/null +++ b/vendor/github.com/uber/jaeger-client-go/thrift/transport_factory.go @@ -0,0 +1,39 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package thrift + +// Factory class used to create wrapped instance of Transports. +// This is used primarily in servers, which get Transports from +// a ServerTransport and then may want to mutate them (i.e. create +// a BufferedTransport from the underlying base transport) +type TTransportFactory interface { + GetTransport(trans TTransport) TTransport +} + +type tTransportFactory struct{} + +// Return a wrapped instance of the base Transport. +func (p *tTransportFactory) GetTransport(trans TTransport) TTransport { + return trans +} + +func NewTTransportFactory() TTransportFactory { + return &tTransportFactory{} +} diff --git a/vendor/github.com/uber/jaeger-client-go/thrift/type.go b/vendor/github.com/uber/jaeger-client-go/thrift/type.go new file mode 100644 index 000000000000..4292ffcadb13 --- /dev/null +++ b/vendor/github.com/uber/jaeger-client-go/thrift/type.go @@ -0,0 +1,69 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package thrift + +// Type constants in the Thrift protocol +type TType byte + +const ( + STOP = 0 + VOID = 1 + BOOL = 2 + BYTE = 3 + I08 = 3 + DOUBLE = 4 + I16 = 6 + I32 = 8 + I64 = 10 + STRING = 11 + UTF7 = 11 + STRUCT = 12 + MAP = 13 + SET = 14 + LIST = 15 + UTF8 = 16 + UTF16 = 17 + //BINARY = 18 wrong and unusued +) + +var typeNames = map[int]string{ + STOP: "STOP", + VOID: "VOID", + BOOL: "BOOL", + BYTE: "BYTE", + DOUBLE: "DOUBLE", + I16: "I16", + I32: "I32", + I64: "I64", + STRING: "STRING", + STRUCT: "STRUCT", + MAP: "MAP", + SET: "SET", + LIST: "LIST", + UTF8: "UTF8", + UTF16: "UTF16", +} + +func (p TType) String() string { + if s, ok := typeNames[int(p)]; ok { + return s + } + return "Unknown" +} diff --git a/vendor/github.com/uber/jaeger-client-go/tracer.go b/vendor/github.com/uber/jaeger-client-go/tracer.go new file mode 100644 index 000000000000..198c32eb4fde --- /dev/null +++ b/vendor/github.com/uber/jaeger-client-go/tracer.go @@ -0,0 +1,431 @@ +// Copyright (c) 2017-2018 Uber Technologies, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package jaeger + +import ( + "fmt" + "io" + "os" + "reflect" + "strconv" + "sync" + "time" + + "github.com/opentracing/opentracing-go" + "github.com/opentracing/opentracing-go/ext" + + "github.com/uber/jaeger-client-go/internal/baggage" + "github.com/uber/jaeger-client-go/internal/throttler" + "github.com/uber/jaeger-client-go/log" + "github.com/uber/jaeger-client-go/utils" +) + +// Tracer implements opentracing.Tracer. +type Tracer struct { + serviceName string + hostIPv4 uint32 // this is for zipkin endpoint conversion + + sampler Sampler + reporter Reporter + metrics Metrics + logger log.Logger + + timeNow func() time.Time + randomNumber func() uint64 + + options struct { + poolSpans bool + gen128Bit bool // whether to generate 128bit trace IDs + zipkinSharedRPCSpan bool + highTraceIDGenerator func() uint64 // custom high trace ID generator + maxTagValueLength int + // more options to come + } + // pool for Span objects + spanPool sync.Pool + + injectors map[interface{}]Injector + extractors map[interface{}]Extractor + + observer compositeObserver + + tags []Tag + process Process + + baggageRestrictionManager baggage.RestrictionManager + baggageSetter *baggageSetter + + debugThrottler throttler.Throttler +} + +// NewTracer creates Tracer implementation that reports tracing to Jaeger. +// The returned io.Closer can be used in shutdown hooks to ensure that the internal +// queue of the Reporter is drained and all buffered spans are submitted to collectors. +func NewTracer( + serviceName string, + sampler Sampler, + reporter Reporter, + options ...TracerOption, +) (opentracing.Tracer, io.Closer) { + t := &Tracer{ + serviceName: serviceName, + sampler: sampler, + reporter: reporter, + injectors: make(map[interface{}]Injector), + extractors: make(map[interface{}]Extractor), + metrics: *NewNullMetrics(), + spanPool: sync.Pool{New: func() interface{} { + return &Span{} + }}, + } + + for _, option := range options { + option(t) + } + + // register default injectors/extractors unless they are already provided via options + textPropagator := newTextMapPropagator(getDefaultHeadersConfig(), t.metrics) + t.addCodec(opentracing.TextMap, textPropagator, textPropagator) + + httpHeaderPropagator := newHTTPHeaderPropagator(getDefaultHeadersConfig(), t.metrics) + t.addCodec(opentracing.HTTPHeaders, httpHeaderPropagator, httpHeaderPropagator) + + binaryPropagator := newBinaryPropagator(t) + t.addCodec(opentracing.Binary, binaryPropagator, binaryPropagator) + + // TODO remove after TChannel supports OpenTracing + interopPropagator := &jaegerTraceContextPropagator{tracer: t} + t.addCodec(SpanContextFormat, interopPropagator, interopPropagator) + + zipkinPropagator := &zipkinPropagator{tracer: t} + t.addCodec(ZipkinSpanFormat, zipkinPropagator, zipkinPropagator) + + if t.baggageRestrictionManager != nil { + t.baggageSetter = newBaggageSetter(t.baggageRestrictionManager, &t.metrics) + } else { + t.baggageSetter = newBaggageSetter(baggage.NewDefaultRestrictionManager(0), &t.metrics) + } + if t.debugThrottler == nil { + t.debugThrottler = throttler.DefaultThrottler{} + } + + if t.randomNumber == nil { + rng := utils.NewRand(time.Now().UnixNano()) + t.randomNumber = func() uint64 { + return uint64(rng.Int63()) + } + } + if t.timeNow == nil { + t.timeNow = time.Now + } + if t.logger == nil { + t.logger = log.NullLogger + } + // Set tracer-level tags + t.tags = append(t.tags, Tag{key: JaegerClientVersionTagKey, value: JaegerClientVersion}) + if hostname, err := os.Hostname(); err == nil { + t.tags = append(t.tags, Tag{key: TracerHostnameTagKey, value: hostname}) + } + if ip, err := utils.HostIP(); err == nil { + t.tags = append(t.tags, Tag{key: TracerIPTagKey, value: ip.String()}) + t.hostIPv4 = utils.PackIPAsUint32(ip) + } else { + t.logger.Error("Unable to determine this host's IP address: " + err.Error()) + } + + if t.options.gen128Bit { + if t.options.highTraceIDGenerator == nil { + t.options.highTraceIDGenerator = t.randomNumber + } + } else if t.options.highTraceIDGenerator != nil { + t.logger.Error("Overriding high trace ID generator but not generating " + + "128 bit trace IDs, consider enabling the \"Gen128Bit\" option") + } + if t.options.maxTagValueLength == 0 { + t.options.maxTagValueLength = DefaultMaxTagValueLength + } + t.process = Process{ + Service: serviceName, + UUID: strconv.FormatUint(t.randomNumber(), 16), + Tags: t.tags, + } + if throttler, ok := t.debugThrottler.(ProcessSetter); ok { + throttler.SetProcess(t.process) + } + + return t, t +} + +// addCodec adds registers injector and extractor for given propagation format if not already defined. +func (t *Tracer) addCodec(format interface{}, injector Injector, extractor Extractor) { + if _, ok := t.injectors[format]; !ok { + t.injectors[format] = injector + } + if _, ok := t.extractors[format]; !ok { + t.extractors[format] = extractor + } +} + +// StartSpan implements StartSpan() method of opentracing.Tracer. +func (t *Tracer) StartSpan( + operationName string, + options ...opentracing.StartSpanOption, +) opentracing.Span { + sso := opentracing.StartSpanOptions{} + for _, o := range options { + o.Apply(&sso) + } + return t.startSpanWithOptions(operationName, sso) +} + +func (t *Tracer) startSpanWithOptions( + operationName string, + options opentracing.StartSpanOptions, +) opentracing.Span { + if options.StartTime.IsZero() { + options.StartTime = t.timeNow() + } + + // Predicate whether the given span context is a valid reference + // which may be used as parent / debug ID / baggage items source + isValidReference := func(ctx SpanContext) bool { + return ctx.IsValid() || ctx.isDebugIDContainerOnly() || len(ctx.baggage) != 0 + } + + var references []Reference + var parent SpanContext + var hasParent bool // need this because `parent` is a value, not reference + for _, ref := range options.References { + ctx, ok := ref.ReferencedContext.(SpanContext) + if !ok { + t.logger.Error(fmt.Sprintf( + "Reference contains invalid type of SpanReference: %s", + reflect.ValueOf(ref.ReferencedContext))) + continue + } + if !isValidReference(ctx) { + continue + } + references = append(references, Reference{Type: ref.Type, Context: ctx}) + if !hasParent { + parent = ctx + hasParent = ref.Type == opentracing.ChildOfRef + } + } + if !hasParent && isValidReference(parent) { + // If ChildOfRef wasn't found but a FollowFromRef exists, use the context from + // the FollowFromRef as the parent + hasParent = true + } + + rpcServer := false + if v, ok := options.Tags[ext.SpanKindRPCServer.Key]; ok { + rpcServer = (v == ext.SpanKindRPCServerEnum || v == string(ext.SpanKindRPCServerEnum)) + } + + var samplerTags []Tag + var ctx SpanContext + newTrace := false + if !hasParent || !parent.IsValid() { + newTrace = true + ctx.traceID.Low = t.randomID() + if t.options.gen128Bit { + ctx.traceID.High = t.options.highTraceIDGenerator() + } + ctx.spanID = SpanID(ctx.traceID.Low) + ctx.parentID = 0 + ctx.flags = byte(0) + if hasParent && parent.isDebugIDContainerOnly() && t.isDebugAllowed(operationName) { + ctx.flags |= (flagSampled | flagDebug) + samplerTags = []Tag{{key: JaegerDebugHeader, value: parent.debugID}} + } else if sampled, tags := t.sampler.IsSampled(ctx.traceID, operationName); sampled { + ctx.flags |= flagSampled + samplerTags = tags + } + } else { + ctx.traceID = parent.traceID + if rpcServer && t.options.zipkinSharedRPCSpan { + // Support Zipkin's one-span-per-RPC model + ctx.spanID = parent.spanID + ctx.parentID = parent.parentID + } else { + ctx.spanID = SpanID(t.randomID()) + ctx.parentID = parent.spanID + } + ctx.flags = parent.flags + } + if hasParent { + // copy baggage items + if l := len(parent.baggage); l > 0 { + ctx.baggage = make(map[string]string, len(parent.baggage)) + for k, v := range parent.baggage { + ctx.baggage[k] = v + } + } + } + + sp := t.newSpan() + sp.context = ctx + sp.observer = t.observer.OnStartSpan(sp, operationName, options) + return t.startSpanInternal( + sp, + operationName, + options.StartTime, + samplerTags, + options.Tags, + newTrace, + rpcServer, + references, + ) +} + +// Inject implements Inject() method of opentracing.Tracer +func (t *Tracer) Inject(ctx opentracing.SpanContext, format interface{}, carrier interface{}) error { + c, ok := ctx.(SpanContext) + if !ok { + return opentracing.ErrInvalidSpanContext + } + if injector, ok := t.injectors[format]; ok { + return injector.Inject(c, carrier) + } + return opentracing.ErrUnsupportedFormat +} + +// Extract implements Extract() method of opentracing.Tracer +func (t *Tracer) Extract( + format interface{}, + carrier interface{}, +) (opentracing.SpanContext, error) { + if extractor, ok := t.extractors[format]; ok { + return extractor.Extract(carrier) + } + return nil, opentracing.ErrUnsupportedFormat +} + +// Close releases all resources used by the Tracer and flushes any remaining buffered spans. +func (t *Tracer) Close() error { + t.reporter.Close() + t.sampler.Close() + if mgr, ok := t.baggageRestrictionManager.(io.Closer); ok { + mgr.Close() + } + if throttler, ok := t.debugThrottler.(io.Closer); ok { + throttler.Close() + } + return nil +} + +// Tags returns a slice of tracer-level tags. +func (t *Tracer) Tags() []opentracing.Tag { + tags := make([]opentracing.Tag, len(t.tags)) + for i, tag := range t.tags { + tags[i] = opentracing.Tag{Key: tag.key, Value: tag.value} + } + return tags +} + +// newSpan returns an instance of a clean Span object. +// If options.PoolSpans is true, the spans are retrieved from an object pool. +func (t *Tracer) newSpan() *Span { + if !t.options.poolSpans { + return &Span{} + } + sp := t.spanPool.Get().(*Span) + sp.context = emptyContext + sp.tracer = nil + sp.tags = nil + sp.logs = nil + return sp +} + +func (t *Tracer) startSpanInternal( + sp *Span, + operationName string, + startTime time.Time, + internalTags []Tag, + tags opentracing.Tags, + newTrace bool, + rpcServer bool, + references []Reference, +) *Span { + sp.tracer = t + sp.operationName = operationName + sp.startTime = startTime + sp.duration = 0 + sp.references = references + sp.firstInProcess = rpcServer || sp.context.parentID == 0 + if len(tags) > 0 || len(internalTags) > 0 { + sp.tags = make([]Tag, len(internalTags), len(tags)+len(internalTags)) + copy(sp.tags, internalTags) + for k, v := range tags { + sp.observer.OnSetTag(k, v) + if k == string(ext.SamplingPriority) && !setSamplingPriority(sp, v) { + continue + } + sp.setTagNoLocking(k, v) + } + } + // emit metrics + if sp.context.IsSampled() { + t.metrics.SpansStartedSampled.Inc(1) + if newTrace { + // We cannot simply check for parentID==0 because in Zipkin model the + // server-side RPC span has the exact same trace/span/parent IDs as the + // calling client-side span, but obviously the server side span is + // no longer a root span of the trace. + t.metrics.TracesStartedSampled.Inc(1) + } else if sp.firstInProcess { + t.metrics.TracesJoinedSampled.Inc(1) + } + } else { + t.metrics.SpansStartedNotSampled.Inc(1) + if newTrace { + t.metrics.TracesStartedNotSampled.Inc(1) + } else if sp.firstInProcess { + t.metrics.TracesJoinedNotSampled.Inc(1) + } + } + return sp +} + +func (t *Tracer) reportSpan(sp *Span) { + t.metrics.SpansFinished.Inc(1) + if sp.context.IsSampled() { + t.reporter.Report(sp) + } + if t.options.poolSpans { + t.spanPool.Put(sp) + } +} + +// randomID generates a random trace/span ID, using tracer.random() generator. +// It never returns 0. +func (t *Tracer) randomID() uint64 { + val := t.randomNumber() + for val == 0 { + val = t.randomNumber() + } + return val +} + +// (NB) span must hold the lock before making this call +func (t *Tracer) setBaggage(sp *Span, key, value string) { + t.baggageSetter.setBaggage(sp, key, value) +} + +// (NB) span must hold the lock before making this call +func (t *Tracer) isDebugAllowed(operation string) bool { + return t.debugThrottler.IsAllowed(operation) +} diff --git a/vendor/github.com/uber/jaeger-client-go/tracer_options.go b/vendor/github.com/uber/jaeger-client-go/tracer_options.go new file mode 100644 index 000000000000..a90265f031cf --- /dev/null +++ b/vendor/github.com/uber/jaeger-client-go/tracer_options.go @@ -0,0 +1,159 @@ +// Copyright (c) 2017 Uber Technologies, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package jaeger + +import ( + "time" + + "github.com/opentracing/opentracing-go" + + "github.com/uber/jaeger-client-go/internal/baggage" + "github.com/uber/jaeger-client-go/internal/throttler" +) + +// TracerOption is a function that sets some option on the tracer +type TracerOption func(tracer *Tracer) + +// TracerOptions is a factory for all available TracerOption's +var TracerOptions tracerOptions + +type tracerOptions struct{} + +// Metrics creates a TracerOption that initializes Metrics on the tracer, +// which is used to emit statistics. +func (tracerOptions) Metrics(m *Metrics) TracerOption { + return func(tracer *Tracer) { + tracer.metrics = *m + } +} + +// Logger creates a TracerOption that gives the tracer a Logger. +func (tracerOptions) Logger(logger Logger) TracerOption { + return func(tracer *Tracer) { + tracer.logger = logger + } +} + +func (tracerOptions) CustomHeaderKeys(headerKeys *HeadersConfig) TracerOption { + return func(tracer *Tracer) { + if headerKeys == nil { + return + } + textPropagator := newTextMapPropagator(headerKeys.applyDefaults(), tracer.metrics) + tracer.addCodec(opentracing.TextMap, textPropagator, textPropagator) + + httpHeaderPropagator := newHTTPHeaderPropagator(headerKeys.applyDefaults(), tracer.metrics) + tracer.addCodec(opentracing.HTTPHeaders, httpHeaderPropagator, httpHeaderPropagator) + } +} + +// TimeNow creates a TracerOption that gives the tracer a function +// used to generate timestamps for spans. +func (tracerOptions) TimeNow(timeNow func() time.Time) TracerOption { + return func(tracer *Tracer) { + tracer.timeNow = timeNow + } +} + +// RandomNumber creates a TracerOption that gives the tracer +// a thread-safe random number generator function for generating trace IDs. +func (tracerOptions) RandomNumber(randomNumber func() uint64) TracerOption { + return func(tracer *Tracer) { + tracer.randomNumber = randomNumber + } +} + +// PoolSpans creates a TracerOption that tells the tracer whether it should use +// an object pool to minimize span allocations. +// This should be used with care, only if the service is not running any async tasks +// that can access parent spans after those spans have been finished. +func (tracerOptions) PoolSpans(poolSpans bool) TracerOption { + return func(tracer *Tracer) { + tracer.options.poolSpans = poolSpans + } +} + +// Deprecated: HostIPv4 creates a TracerOption that identifies the current service/process. +// If not set, the factory method will obtain the current IP address. +// The TracerOption is deprecated; the tracer will attempt to automatically detect the IP. +func (tracerOptions) HostIPv4(hostIPv4 uint32) TracerOption { + return func(tracer *Tracer) { + tracer.hostIPv4 = hostIPv4 + } +} + +func (tracerOptions) Injector(format interface{}, injector Injector) TracerOption { + return func(tracer *Tracer) { + tracer.injectors[format] = injector + } +} + +func (tracerOptions) Extractor(format interface{}, extractor Extractor) TracerOption { + return func(tracer *Tracer) { + tracer.extractors[format] = extractor + } +} + +func (t tracerOptions) Observer(observer Observer) TracerOption { + return t.ContribObserver(&oldObserver{obs: observer}) +} + +func (tracerOptions) ContribObserver(observer ContribObserver) TracerOption { + return func(tracer *Tracer) { + tracer.observer.append(observer) + } +} + +func (tracerOptions) Gen128Bit(gen128Bit bool) TracerOption { + return func(tracer *Tracer) { + tracer.options.gen128Bit = gen128Bit + } +} + +func (tracerOptions) HighTraceIDGenerator(highTraceIDGenerator func() uint64) TracerOption { + return func(tracer *Tracer) { + tracer.options.highTraceIDGenerator = highTraceIDGenerator + } +} + +func (tracerOptions) MaxTagValueLength(maxTagValueLength int) TracerOption { + return func(tracer *Tracer) { + tracer.options.maxTagValueLength = maxTagValueLength + } +} + +func (tracerOptions) ZipkinSharedRPCSpan(zipkinSharedRPCSpan bool) TracerOption { + return func(tracer *Tracer) { + tracer.options.zipkinSharedRPCSpan = zipkinSharedRPCSpan + } +} + +func (tracerOptions) Tag(key string, value interface{}) TracerOption { + return func(tracer *Tracer) { + tracer.tags = append(tracer.tags, Tag{key: key, value: value}) + } +} + +func (tracerOptions) BaggageRestrictionManager(mgr baggage.RestrictionManager) TracerOption { + return func(tracer *Tracer) { + tracer.baggageRestrictionManager = mgr + } +} + +func (tracerOptions) DebugThrottler(throttler throttler.Throttler) TracerOption { + return func(tracer *Tracer) { + tracer.debugThrottler = throttler + } +} diff --git a/vendor/github.com/uber/jaeger-client-go/transport.go b/vendor/github.com/uber/jaeger-client-go/transport.go new file mode 100644 index 000000000000..c5f5b19551fe --- /dev/null +++ b/vendor/github.com/uber/jaeger-client-go/transport.go @@ -0,0 +1,38 @@ +// Copyright (c) 2017 Uber Technologies, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package jaeger + +import ( + "io" +) + +// Transport abstracts the method of sending spans out of process. +// Implementations are NOT required to be thread-safe; the RemoteReporter +// is expected to only call methods on the Transport from the same go-routine. +type Transport interface { + // Append converts the span to the wire representation and adds it + // to sender's internal buffer. If the buffer exceeds its designated + // size, the transport should call Flush() and return the number of spans + // flushed, otherwise return 0. If error is returned, the returned number + // of spans is treated as failed span, and reported to metrics accordingly. + Append(span *Span) (int, error) + + // Flush submits the internal buffer to the remote server. It returns the + // number of spans flushed. If error is returned, the returned number of + // spans is treated as failed span, and reported to metrics accordingly. + Flush() (int, error) + + io.Closer +} diff --git a/vendor/github.com/uber/jaeger-client-go/transport_udp.go b/vendor/github.com/uber/jaeger-client-go/transport_udp.go new file mode 100644 index 000000000000..7b9ccf937449 --- /dev/null +++ b/vendor/github.com/uber/jaeger-client-go/transport_udp.go @@ -0,0 +1,131 @@ +// Copyright (c) 2017 Uber Technologies, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package jaeger + +import ( + "errors" + "fmt" + + "github.com/uber/jaeger-client-go/thrift" + + j "github.com/uber/jaeger-client-go/thrift-gen/jaeger" + "github.com/uber/jaeger-client-go/utils" +) + +// Empirically obtained constant for how many bytes in the message are used for envelope. +// The total datagram size is: +// sizeof(Span) * numSpans + processByteSize + emitBatchOverhead <= maxPacketSize +// There is a unit test `TestEmitBatchOverhead` that validates this number. +// Note that due to the use of Compact Thrift protocol, overhead grows with the number of spans +// in the batch, because the length of the list is encoded as varint32, as well as SeqId. +const emitBatchOverhead = 30 + +var errSpanTooLarge = errors.New("Span is too large") + +type udpSender struct { + client *utils.AgentClientUDP + maxPacketSize int // max size of datagram in bytes + maxSpanBytes int // max number of bytes to record spans (excluding envelope) in the datagram + byteBufferSize int // current number of span bytes accumulated in the buffer + spanBuffer []*j.Span // spans buffered before a flush + thriftBuffer *thrift.TMemoryBuffer // buffer used to calculate byte size of a span + thriftProtocol thrift.TProtocol + process *j.Process + processByteSize int +} + +// NewUDPTransport creates a reporter that submits spans to jaeger-agent +func NewUDPTransport(hostPort string, maxPacketSize int) (Transport, error) { + if len(hostPort) == 0 { + hostPort = fmt.Sprintf("%s:%d", DefaultUDPSpanServerHost, DefaultUDPSpanServerPort) + } + if maxPacketSize == 0 { + maxPacketSize = utils.UDPPacketMaxLength + } + + protocolFactory := thrift.NewTCompactProtocolFactory() + + // Each span is first written to thriftBuffer to determine its size in bytes. + thriftBuffer := thrift.NewTMemoryBufferLen(maxPacketSize) + thriftProtocol := protocolFactory.GetProtocol(thriftBuffer) + + client, err := utils.NewAgentClientUDP(hostPort, maxPacketSize) + if err != nil { + return nil, err + } + + sender := &udpSender{ + client: client, + maxSpanBytes: maxPacketSize - emitBatchOverhead, + thriftBuffer: thriftBuffer, + thriftProtocol: thriftProtocol} + return sender, nil +} + +func (s *udpSender) calcSizeOfSerializedThrift(thriftStruct thrift.TStruct) int { + s.thriftBuffer.Reset() + thriftStruct.Write(s.thriftProtocol) + return s.thriftBuffer.Len() +} + +func (s *udpSender) Append(span *Span) (int, error) { + if s.process == nil { + s.process = BuildJaegerProcessThrift(span) + s.processByteSize = s.calcSizeOfSerializedThrift(s.process) + s.byteBufferSize += s.processByteSize + } + jSpan := BuildJaegerThrift(span) + spanSize := s.calcSizeOfSerializedThrift(jSpan) + if spanSize > s.maxSpanBytes { + return 1, errSpanTooLarge + } + + s.byteBufferSize += spanSize + if s.byteBufferSize <= s.maxSpanBytes { + s.spanBuffer = append(s.spanBuffer, jSpan) + if s.byteBufferSize < s.maxSpanBytes { + return 0, nil + } + return s.Flush() + } + // the latest span did not fit in the buffer + n, err := s.Flush() + s.spanBuffer = append(s.spanBuffer, jSpan) + s.byteBufferSize = spanSize + s.processByteSize + return n, err +} + +func (s *udpSender) Flush() (int, error) { + n := len(s.spanBuffer) + if n == 0 { + return 0, nil + } + err := s.client.EmitBatch(&j.Batch{Process: s.process, Spans: s.spanBuffer}) + s.resetBuffers() + + return n, err +} + +func (s *udpSender) Close() error { + return s.client.Close() +} + +func (s *udpSender) resetBuffers() { + for i := range s.spanBuffer { + s.spanBuffer[i] = nil + } + s.spanBuffer = s.spanBuffer[:0] + s.byteBufferSize = s.processByteSize +} diff --git a/vendor/github.com/uber/jaeger-client-go/utils/http_json.go b/vendor/github.com/uber/jaeger-client-go/utils/http_json.go new file mode 100644 index 000000000000..237211f8224b --- /dev/null +++ b/vendor/github.com/uber/jaeger-client-go/utils/http_json.go @@ -0,0 +1,54 @@ +// Copyright (c) 2017 Uber Technologies, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package utils + +import ( + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net/http" +) + +// GetJSON makes an HTTP call to the specified URL and parses the returned JSON into `out`. +func GetJSON(url string, out interface{}) error { + resp, err := http.Get(url) + if err != nil { + return err + } + return ReadJSON(resp, out) +} + +// ReadJSON reads JSON from http.Response and parses it into `out` +func ReadJSON(resp *http.Response, out interface{}) error { + defer resp.Body.Close() + + if resp.StatusCode >= 400 { + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return err + } + + return fmt.Errorf("StatusCode: %d, Body: %s", resp.StatusCode, body) + } + + if out == nil { + io.Copy(ioutil.Discard, resp.Body) + return nil + } + + decoder := json.NewDecoder(resp.Body) + return decoder.Decode(out) +} diff --git a/vendor/github.com/uber/jaeger-client-go/utils/localip.go b/vendor/github.com/uber/jaeger-client-go/utils/localip.go new file mode 100644 index 000000000000..b51af7713f7c --- /dev/null +++ b/vendor/github.com/uber/jaeger-client-go/utils/localip.go @@ -0,0 +1,84 @@ +// Copyright (c) 2017 Uber Technologies, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package utils + +import ( + "errors" + "net" +) + +// This code is borrowed from https://github.com/uber/tchannel-go/blob/dev/localip.go + +// scoreAddr scores how likely the given addr is to be a remote address and returns the +// IP to use when listening. Any address which receives a negative score should not be used. +// Scores are calculated as: +// -1 for any unknown IP addresses. +// +300 for IPv4 addresses +// +100 for non-local addresses, extra +100 for "up" interaces. +func scoreAddr(iface net.Interface, addr net.Addr) (int, net.IP) { + var ip net.IP + if netAddr, ok := addr.(*net.IPNet); ok { + ip = netAddr.IP + } else if netIP, ok := addr.(*net.IPAddr); ok { + ip = netIP.IP + } else { + return -1, nil + } + + var score int + if ip.To4() != nil { + score += 300 + } + if iface.Flags&net.FlagLoopback == 0 && !ip.IsLoopback() { + score += 100 + if iface.Flags&net.FlagUp != 0 { + score += 100 + } + } + return score, ip +} + +// HostIP tries to find an IP that can be used by other machines to reach this machine. +func HostIP() (net.IP, error) { + interfaces, err := net.Interfaces() + if err != nil { + return nil, err + } + + bestScore := -1 + var bestIP net.IP + // Select the highest scoring IP as the best IP. + for _, iface := range interfaces { + addrs, err := iface.Addrs() + if err != nil { + // Skip this interface if there is an error. + continue + } + + for _, addr := range addrs { + score, ip := scoreAddr(iface, addr) + if score > bestScore { + bestScore = score + bestIP = ip + } + } + } + + if bestScore == -1 { + return nil, errors.New("no addresses to listen on") + } + + return bestIP, nil +} diff --git a/vendor/github.com/uber/jaeger-client-go/utils/rand.go b/vendor/github.com/uber/jaeger-client-go/utils/rand.go new file mode 100644 index 000000000000..9875f7f55cbd --- /dev/null +++ b/vendor/github.com/uber/jaeger-client-go/utils/rand.go @@ -0,0 +1,46 @@ +// Copyright (c) 2017 Uber Technologies, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package utils + +import ( + "math/rand" + "sync" +) + +// lockedSource allows a random number generator to be used by multiple goroutines concurrently. +// The code is very similar to math/rand.lockedSource, which is unfortunately not exposed. +type lockedSource struct { + mut sync.Mutex + src rand.Source +} + +// NewRand returns a rand.Rand that is threadsafe. +func NewRand(seed int64) *rand.Rand { + return rand.New(&lockedSource{src: rand.NewSource(seed)}) +} + +func (r *lockedSource) Int63() (n int64) { + r.mut.Lock() + n = r.src.Int63() + r.mut.Unlock() + return +} + +// Seed implements Seed() of Source +func (r *lockedSource) Seed(seed int64) { + r.mut.Lock() + r.src.Seed(seed) + r.mut.Unlock() +} diff --git a/vendor/github.com/uber/jaeger-client-go/utils/rate_limiter.go b/vendor/github.com/uber/jaeger-client-go/utils/rate_limiter.go new file mode 100644 index 000000000000..1b8db9758486 --- /dev/null +++ b/vendor/github.com/uber/jaeger-client-go/utils/rate_limiter.go @@ -0,0 +1,77 @@ +// Copyright (c) 2017 Uber Technologies, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package utils + +import ( + "sync" + "time" +) + +// RateLimiter is a filter used to check if a message that is worth itemCost units is within the rate limits. +type RateLimiter interface { + CheckCredit(itemCost float64) bool +} + +type rateLimiter struct { + sync.Mutex + + creditsPerSecond float64 + balance float64 + maxBalance float64 + lastTick time.Time + + timeNow func() time.Time +} + +// NewRateLimiter creates a new rate limiter based on leaky bucket algorithm, formulated in terms of a +// credits balance that is replenished every time CheckCredit() method is called (tick) by the amount proportional +// to the time elapsed since the last tick, up to max of creditsPerSecond. A call to CheckCredit() takes a cost +// of an item we want to pay with the balance. If the balance exceeds the cost of the item, the item is "purchased" +// and the balance reduced, indicated by returned value of true. Otherwise the balance is unchanged and return false. +// +// This can be used to limit a rate of messages emitted by a service by instantiating the Rate Limiter with the +// max number of messages a service is allowed to emit per second, and calling CheckCredit(1.0) for each message +// to determine if the message is within the rate limit. +// +// It can also be used to limit the rate of traffic in bytes, by setting creditsPerSecond to desired throughput +// as bytes/second, and calling CheckCredit() with the actual message size. +func NewRateLimiter(creditsPerSecond, maxBalance float64) RateLimiter { + return &rateLimiter{ + creditsPerSecond: creditsPerSecond, + balance: maxBalance, + maxBalance: maxBalance, + lastTick: time.Now(), + timeNow: time.Now} +} + +func (b *rateLimiter) CheckCredit(itemCost float64) bool { + b.Lock() + defer b.Unlock() + // calculate how much time passed since the last tick, and update current tick + currentTime := b.timeNow() + elapsedTime := currentTime.Sub(b.lastTick) + b.lastTick = currentTime + // calculate how much credit have we accumulated since the last tick + b.balance += elapsedTime.Seconds() * b.creditsPerSecond + if b.balance > b.maxBalance { + b.balance = b.maxBalance + } + // if we have enough credits to pay for current item, then reduce balance and allow + if b.balance >= itemCost { + b.balance -= itemCost + return true + } + return false +} diff --git a/vendor/github.com/uber/jaeger-client-go/utils/udp_client.go b/vendor/github.com/uber/jaeger-client-go/utils/udp_client.go new file mode 100644 index 000000000000..6f042073d631 --- /dev/null +++ b/vendor/github.com/uber/jaeger-client-go/utils/udp_client.go @@ -0,0 +1,98 @@ +// Copyright (c) 2017 Uber Technologies, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package utils + +import ( + "errors" + "fmt" + "io" + "net" + + "github.com/uber/jaeger-client-go/thrift" + + "github.com/uber/jaeger-client-go/thrift-gen/agent" + "github.com/uber/jaeger-client-go/thrift-gen/jaeger" + "github.com/uber/jaeger-client-go/thrift-gen/zipkincore" +) + +// UDPPacketMaxLength is the max size of UDP packet we want to send, synced with jaeger-agent +const UDPPacketMaxLength = 65000 + +// AgentClientUDP is a UDP client to Jaeger agent that implements agent.Agent interface. +type AgentClientUDP struct { + agent.Agent + io.Closer + + connUDP *net.UDPConn + client *agent.AgentClient + maxPacketSize int // max size of datagram in bytes + thriftBuffer *thrift.TMemoryBuffer // buffer used to calculate byte size of a span +} + +// NewAgentClientUDP creates a client that sends spans to Jaeger Agent over UDP. +func NewAgentClientUDP(hostPort string, maxPacketSize int) (*AgentClientUDP, error) { + if maxPacketSize == 0 { + maxPacketSize = UDPPacketMaxLength + } + + thriftBuffer := thrift.NewTMemoryBufferLen(maxPacketSize) + protocolFactory := thrift.NewTCompactProtocolFactory() + client := agent.NewAgentClientFactory(thriftBuffer, protocolFactory) + + destAddr, err := net.ResolveUDPAddr("udp", hostPort) + if err != nil { + return nil, err + } + + connUDP, err := net.DialUDP(destAddr.Network(), nil, destAddr) + if err != nil { + return nil, err + } + if err := connUDP.SetWriteBuffer(maxPacketSize); err != nil { + return nil, err + } + + clientUDP := &AgentClientUDP{ + connUDP: connUDP, + client: client, + maxPacketSize: maxPacketSize, + thriftBuffer: thriftBuffer} + return clientUDP, nil +} + +// EmitZipkinBatch implements EmitZipkinBatch() of Agent interface +func (a *AgentClientUDP) EmitZipkinBatch(spans []*zipkincore.Span) error { + return errors.New("Not implemented") +} + +// EmitBatch implements EmitBatch() of Agent interface +func (a *AgentClientUDP) EmitBatch(batch *jaeger.Batch) error { + a.thriftBuffer.Reset() + a.client.SeqId = 0 // we have no need for distinct SeqIds for our one-way UDP messages + if err := a.client.EmitBatch(batch); err != nil { + return err + } + if a.thriftBuffer.Len() > a.maxPacketSize { + return fmt.Errorf("Data does not fit within one UDP packet; size %d, max %d, spans %d", + a.thriftBuffer.Len(), a.maxPacketSize, len(batch.Spans)) + } + _, err := a.connUDP.Write(a.thriftBuffer.Bytes()) + return err +} + +// Close implements Close() of io.Closer and closes the underlying UDP connection. +func (a *AgentClientUDP) Close() error { + return a.connUDP.Close() +} diff --git a/vendor/github.com/uber/jaeger-client-go/utils/utils.go b/vendor/github.com/uber/jaeger-client-go/utils/utils.go new file mode 100644 index 000000000000..ac3c325d1ede --- /dev/null +++ b/vendor/github.com/uber/jaeger-client-go/utils/utils.go @@ -0,0 +1,87 @@ +// Copyright (c) 2017 Uber Technologies, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package utils + +import ( + "encoding/binary" + "errors" + "net" + "strconv" + "strings" + "time" +) + +var ( + // ErrEmptyIP an error for empty ip strings + ErrEmptyIP = errors.New("empty string given for ip") + + // ErrNotHostColonPort an error for invalid host port string + ErrNotHostColonPort = errors.New("expecting host:port") + + // ErrNotFourOctets an error for the wrong number of octets after splitting a string + ErrNotFourOctets = errors.New("Wrong number of octets") +) + +// ParseIPToUint32 converts a string ip (e.g. "x.y.z.w") to an uint32 +func ParseIPToUint32(ip string) (uint32, error) { + if ip == "" { + return 0, ErrEmptyIP + } + + if ip == "localhost" { + return 127<<24 | 1, nil + } + + octets := strings.Split(ip, ".") + if len(octets) != 4 { + return 0, ErrNotFourOctets + } + + var intIP uint32 + for i := 0; i < 4; i++ { + octet, err := strconv.Atoi(octets[i]) + if err != nil { + return 0, err + } + intIP = (intIP << 8) | uint32(octet) + } + + return intIP, nil +} + +// ParsePort converts port number from string to uin16 +func ParsePort(portString string) (uint16, error) { + port, err := strconv.ParseUint(portString, 10, 16) + return uint16(port), err +} + +// PackIPAsUint32 packs an IPv4 as uint32 +func PackIPAsUint32(ip net.IP) uint32 { + if ipv4 := ip.To4(); ipv4 != nil { + return binary.BigEndian.Uint32(ipv4) + } + return 0 +} + +// TimeToMicrosecondsSinceEpochInt64 converts Go time.Time to a long +// representing time since epoch in microseconds, which is used expected +// in the Jaeger spans encoded as Thrift. +func TimeToMicrosecondsSinceEpochInt64(t time.Time) int64 { + // ^^^ Passing time.Time by value is faster than passing a pointer! + // BenchmarkTimeByValue-8 2000000000 1.37 ns/op + // BenchmarkTimeByPtr-8 2000000000 1.98 ns/op + + return t.UnixNano() / 1000 +} diff --git a/vendor/github.com/uber/jaeger-client-go/zipkin.go b/vendor/github.com/uber/jaeger-client-go/zipkin.go new file mode 100644 index 000000000000..636952b7f1d0 --- /dev/null +++ b/vendor/github.com/uber/jaeger-client-go/zipkin.go @@ -0,0 +1,76 @@ +// Copyright (c) 2017 Uber Technologies, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package jaeger + +import ( + "github.com/opentracing/opentracing-go" +) + +// ZipkinSpanFormat is an OpenTracing carrier format constant +const ZipkinSpanFormat = "zipkin-span-format" + +// ExtractableZipkinSpan is a type of Carrier used for integration with Zipkin-aware +// RPC frameworks (like TChannel). It does not support baggage, only trace IDs. +type ExtractableZipkinSpan interface { + TraceID() uint64 + SpanID() uint64 + ParentID() uint64 + Flags() byte +} + +// InjectableZipkinSpan is a type of Carrier used for integration with Zipkin-aware +// RPC frameworks (like TChannel). It does not support baggage, only trace IDs. +type InjectableZipkinSpan interface { + SetTraceID(traceID uint64) + SetSpanID(spanID uint64) + SetParentID(parentID uint64) + SetFlags(flags byte) +} + +type zipkinPropagator struct { + tracer *Tracer +} + +func (p *zipkinPropagator) Inject( + ctx SpanContext, + abstractCarrier interface{}, +) error { + carrier, ok := abstractCarrier.(InjectableZipkinSpan) + if !ok { + return opentracing.ErrInvalidCarrier + } + + carrier.SetTraceID(ctx.TraceID().Low) // TODO this cannot work with 128bit IDs + carrier.SetSpanID(uint64(ctx.SpanID())) + carrier.SetParentID(uint64(ctx.ParentID())) + carrier.SetFlags(ctx.flags) + return nil +} + +func (p *zipkinPropagator) Extract(abstractCarrier interface{}) (SpanContext, error) { + carrier, ok := abstractCarrier.(ExtractableZipkinSpan) + if !ok { + return emptyContext, opentracing.ErrInvalidCarrier + } + if carrier.TraceID() == 0 { + return emptyContext, opentracing.ErrSpanContextNotFound + } + var ctx SpanContext + ctx.traceID.Low = carrier.TraceID() + ctx.spanID = SpanID(carrier.SpanID()) + ctx.parentID = SpanID(carrier.ParentID()) + ctx.flags = carrier.Flags() + return ctx, nil +} diff --git a/vendor/github.com/uber/jaeger-client-go/zipkin_thrift_span.go b/vendor/github.com/uber/jaeger-client-go/zipkin_thrift_span.go new file mode 100644 index 000000000000..dce58b4331c4 --- /dev/null +++ b/vendor/github.com/uber/jaeger-client-go/zipkin_thrift_span.go @@ -0,0 +1,322 @@ +// Copyright (c) 2017 Uber Technologies, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package jaeger + +import ( + "encoding/binary" + "fmt" + "time" + + "github.com/opentracing/opentracing-go/ext" + + "github.com/uber/jaeger-client-go/internal/spanlog" + z "github.com/uber/jaeger-client-go/thrift-gen/zipkincore" + "github.com/uber/jaeger-client-go/utils" +) + +const ( + // Zipkin UI does not work well with non-string tag values + allowPackedNumbers = false +) + +var specialTagHandlers = map[string]func(*zipkinSpan, interface{}){ + string(ext.SpanKind): setSpanKind, + string(ext.PeerHostIPv4): setPeerIPv4, + string(ext.PeerPort): setPeerPort, + string(ext.PeerService): setPeerService, + TracerIPTagKey: removeTag, +} + +// BuildZipkinThrift builds thrift span based on internal span. +func BuildZipkinThrift(s *Span) *z.Span { + span := &zipkinSpan{Span: s} + span.handleSpecialTags() + parentID := int64(span.context.parentID) + var ptrParentID *int64 + if parentID != 0 { + ptrParentID = &parentID + } + timestamp := utils.TimeToMicrosecondsSinceEpochInt64(span.startTime) + duration := span.duration.Nanoseconds() / int64(time.Microsecond) + endpoint := &z.Endpoint{ + ServiceName: span.tracer.serviceName, + Ipv4: int32(span.tracer.hostIPv4)} + thriftSpan := &z.Span{ + TraceID: int64(span.context.traceID.Low), // TODO upgrade zipkin thrift and use TraceIdHigh + ID: int64(span.context.spanID), + ParentID: ptrParentID, + Name: span.operationName, + Timestamp: ×tamp, + Duration: &duration, + Debug: span.context.IsDebug(), + Annotations: buildAnnotations(span, endpoint), + BinaryAnnotations: buildBinaryAnnotations(span, endpoint)} + return thriftSpan +} + +func buildAnnotations(span *zipkinSpan, endpoint *z.Endpoint) []*z.Annotation { + // automatically adding 2 Zipkin CoreAnnotations + annotations := make([]*z.Annotation, 0, 2+len(span.logs)) + var startLabel, endLabel string + if span.spanKind == string(ext.SpanKindRPCClientEnum) { + startLabel, endLabel = z.CLIENT_SEND, z.CLIENT_RECV + } else if span.spanKind == string(ext.SpanKindRPCServerEnum) { + startLabel, endLabel = z.SERVER_RECV, z.SERVER_SEND + } + if !span.startTime.IsZero() && startLabel != "" { + start := &z.Annotation{ + Timestamp: utils.TimeToMicrosecondsSinceEpochInt64(span.startTime), + Value: startLabel, + Host: endpoint} + annotations = append(annotations, start) + if span.duration != 0 { + endTs := span.startTime.Add(span.duration) + end := &z.Annotation{ + Timestamp: utils.TimeToMicrosecondsSinceEpochInt64(endTs), + Value: endLabel, + Host: endpoint} + annotations = append(annotations, end) + } + } + for _, log := range span.logs { + anno := &z.Annotation{ + Timestamp: utils.TimeToMicrosecondsSinceEpochInt64(log.Timestamp), + Host: endpoint} + if content, err := spanlog.MaterializeWithJSON(log.Fields); err == nil { + anno.Value = truncateString(string(content), span.tracer.options.maxTagValueLength) + } else { + anno.Value = err.Error() + } + annotations = append(annotations, anno) + } + return annotations +} + +func buildBinaryAnnotations(span *zipkinSpan, endpoint *z.Endpoint) []*z.BinaryAnnotation { + // automatically adding local component or server/client address tag, and client version + annotations := make([]*z.BinaryAnnotation, 0, 2+len(span.tags)) + + if span.peerDefined() && span.isRPC() { + peer := z.Endpoint{ + Ipv4: span.peer.Ipv4, + Port: span.peer.Port, + ServiceName: span.peer.ServiceName} + label := z.CLIENT_ADDR + if span.isRPCClient() { + label = z.SERVER_ADDR + } + anno := &z.BinaryAnnotation{ + Key: label, + Value: []byte{1}, + AnnotationType: z.AnnotationType_BOOL, + Host: &peer} + annotations = append(annotations, anno) + } + if !span.isRPC() { + componentName := endpoint.ServiceName + for _, tag := range span.tags { + if tag.key == string(ext.Component) { + componentName = stringify(tag.value) + break + } + } + local := &z.BinaryAnnotation{ + Key: z.LOCAL_COMPONENT, + Value: []byte(componentName), + AnnotationType: z.AnnotationType_STRING, + Host: endpoint} + annotations = append(annotations, local) + } + for _, tag := range span.tags { + // "Special tags" are already handled by this point, we'd be double reporting the + // tags if we don't skip here + if _, ok := specialTagHandlers[tag.key]; ok { + continue + } + if anno := buildBinaryAnnotation(tag.key, tag.value, span.tracer.options.maxTagValueLength, nil); anno != nil { + annotations = append(annotations, anno) + } + } + return annotations +} + +func buildBinaryAnnotation(key string, val interface{}, maxTagValueLength int, endpoint *z.Endpoint) *z.BinaryAnnotation { + bann := &z.BinaryAnnotation{Key: key, Host: endpoint} + if value, ok := val.(string); ok { + bann.Value = []byte(truncateString(value, maxTagValueLength)) + bann.AnnotationType = z.AnnotationType_STRING + } else if value, ok := val.([]byte); ok { + if len(value) > maxTagValueLength { + value = value[:maxTagValueLength] + } + bann.Value = value + bann.AnnotationType = z.AnnotationType_BYTES + } else if value, ok := val.(int32); ok && allowPackedNumbers { + bann.Value = int32ToBytes(value) + bann.AnnotationType = z.AnnotationType_I32 + } else if value, ok := val.(int64); ok && allowPackedNumbers { + bann.Value = int64ToBytes(value) + bann.AnnotationType = z.AnnotationType_I64 + } else if value, ok := val.(int); ok && allowPackedNumbers { + bann.Value = int64ToBytes(int64(value)) + bann.AnnotationType = z.AnnotationType_I64 + } else if value, ok := val.(bool); ok { + bann.Value = []byte{boolToByte(value)} + bann.AnnotationType = z.AnnotationType_BOOL + } else { + value := stringify(val) + bann.Value = []byte(truncateString(value, maxTagValueLength)) + bann.AnnotationType = z.AnnotationType_STRING + } + return bann +} + +func stringify(value interface{}) string { + if s, ok := value.(string); ok { + return s + } + return fmt.Sprintf("%+v", value) +} + +func truncateString(value string, maxLength int) string { + // we ignore the problem of utf8 runes possibly being sliced in the middle, + // as it is rather expensive to iterate through each tag just to find rune + // boundaries. + if len(value) > maxLength { + return value[:maxLength] + } + return value +} + +func boolToByte(b bool) byte { + if b { + return 1 + } + return 0 +} + +// int32ToBytes converts int32 to bytes. +func int32ToBytes(i int32) []byte { + buf := make([]byte, 4) + binary.BigEndian.PutUint32(buf, uint32(i)) + return buf +} + +// int64ToBytes converts int64 to bytes. +func int64ToBytes(i int64) []byte { + buf := make([]byte, 8) + binary.BigEndian.PutUint64(buf, uint64(i)) + return buf +} + +type zipkinSpan struct { + *Span + + // peer points to the peer service participating in this span, + // e.g. the Client if this span is a server span, + // or Server if this span is a client span + peer struct { + Ipv4 int32 + Port int16 + ServiceName string + } + + // used to distinguish local vs. RPC Server vs. RPC Client spans + spanKind string +} + +func (s *zipkinSpan) handleSpecialTags() { + s.Lock() + defer s.Unlock() + if s.firstInProcess { + // append the process tags + s.tags = append(s.tags, s.tracer.tags...) + } + filteredTags := make([]Tag, 0, len(s.tags)) + for _, tag := range s.tags { + if handler, ok := specialTagHandlers[tag.key]; ok { + handler(s, tag.value) + } else { + filteredTags = append(filteredTags, tag) + } + } + s.tags = filteredTags +} + +func setSpanKind(s *zipkinSpan, value interface{}) { + if val, ok := value.(string); ok { + s.spanKind = val + return + } + if val, ok := value.(ext.SpanKindEnum); ok { + s.spanKind = string(val) + } +} + +func setPeerIPv4(s *zipkinSpan, value interface{}) { + if val, ok := value.(string); ok { + if ip, err := utils.ParseIPToUint32(val); err == nil { + s.peer.Ipv4 = int32(ip) + return + } + } + if val, ok := value.(uint32); ok { + s.peer.Ipv4 = int32(val) + return + } + if val, ok := value.(int32); ok { + s.peer.Ipv4 = val + } +} + +func setPeerPort(s *zipkinSpan, value interface{}) { + if val, ok := value.(string); ok { + if port, err := utils.ParsePort(val); err == nil { + s.peer.Port = int16(port) + return + } + } + if val, ok := value.(uint16); ok { + s.peer.Port = int16(val) + return + } + if val, ok := value.(int); ok { + s.peer.Port = int16(val) + } +} + +func setPeerService(s *zipkinSpan, value interface{}) { + if val, ok := value.(string); ok { + s.peer.ServiceName = val + } +} + +func removeTag(s *zipkinSpan, value interface{}) {} + +func (s *zipkinSpan) peerDefined() bool { + return s.peer.ServiceName != "" || s.peer.Ipv4 != 0 || s.peer.Port != 0 +} + +func (s *zipkinSpan) isRPC() bool { + s.RLock() + defer s.RUnlock() + return s.spanKind == string(ext.SpanKindRPCClientEnum) || s.spanKind == string(ext.SpanKindRPCServerEnum) +} + +func (s *zipkinSpan) isRPCClient() bool { + s.RLock() + defer s.RUnlock() + return s.spanKind == string(ext.SpanKindRPCClientEnum) +} diff --git a/vendor/github.com/uber/jaeger-lib/LICENSE b/vendor/github.com/uber/jaeger-lib/LICENSE new file mode 100644 index 000000000000..261eeb9e9f8b --- /dev/null +++ b/vendor/github.com/uber/jaeger-lib/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/uber/jaeger-lib/metrics/counter.go b/vendor/github.com/uber/jaeger-lib/metrics/counter.go new file mode 100644 index 000000000000..2a6a43efdb45 --- /dev/null +++ b/vendor/github.com/uber/jaeger-lib/metrics/counter.go @@ -0,0 +1,28 @@ +// Copyright (c) 2017 Uber Technologies, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package metrics + +// Counter tracks the number of times an event has occurred +type Counter interface { + // Inc adds the given value to the counter. + Inc(int64) +} + +// NullCounter counter that does nothing +var NullCounter Counter = nullCounter{} + +type nullCounter struct{} + +func (nullCounter) Inc(int64) {} diff --git a/vendor/github.com/uber/jaeger-lib/metrics/factory.go b/vendor/github.com/uber/jaeger-lib/metrics/factory.go new file mode 100644 index 000000000000..a744a890df3d --- /dev/null +++ b/vendor/github.com/uber/jaeger-lib/metrics/factory.go @@ -0,0 +1,35 @@ +// Copyright (c) 2017 Uber Technologies, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package metrics + +// Factory creates new metrics +type Factory interface { + Counter(name string, tags map[string]string) Counter + Timer(name string, tags map[string]string) Timer + Gauge(name string, tags map[string]string) Gauge + + // Namespace returns a nested metrics factory. + Namespace(name string, tags map[string]string) Factory +} + +// NullFactory is a metrics factory that returns NullCounter, NullTimer, and NullGauge. +var NullFactory Factory = nullFactory{} + +type nullFactory struct{} + +func (nullFactory) Counter(name string, tags map[string]string) Counter { return NullCounter } +func (nullFactory) Timer(name string, tags map[string]string) Timer { return NullTimer } +func (nullFactory) Gauge(name string, tags map[string]string) Gauge { return NullGauge } +func (nullFactory) Namespace(name string, tags map[string]string) Factory { return NullFactory } diff --git a/vendor/github.com/uber/jaeger-lib/metrics/gauge.go b/vendor/github.com/uber/jaeger-lib/metrics/gauge.go new file mode 100644 index 000000000000..3c606391a095 --- /dev/null +++ b/vendor/github.com/uber/jaeger-lib/metrics/gauge.go @@ -0,0 +1,28 @@ +// Copyright (c) 2017 Uber Technologies, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package metrics + +// Gauge returns instantaneous measurements of something as an int64 value +type Gauge interface { + // Update the gauge to the value passed in. + Update(int64) +} + +// NullGauge gauge that does nothing +var NullGauge Gauge = nullGauge{} + +type nullGauge struct{} + +func (nullGauge) Update(int64) {} diff --git a/vendor/github.com/uber/jaeger-lib/metrics/local.go b/vendor/github.com/uber/jaeger-lib/metrics/local.go new file mode 100644 index 000000000000..217d30600a0b --- /dev/null +++ b/vendor/github.com/uber/jaeger-lib/metrics/local.go @@ -0,0 +1,337 @@ +// Copyright (c) 2017 Uber Technologies, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package metrics + +import ( + "sort" + "sync" + "sync/atomic" + "time" + + "github.com/codahale/hdrhistogram" +) + +// This is intentionally very similar to github.com/codahale/metrics, the +// main difference being that counters/gauges are scoped to the provider +// rather than being global (to facilitate testing). + +// A LocalBackend is a metrics provider which aggregates data in-vm, and +// allows exporting snapshots to shove the data into a remote collector +type LocalBackend struct { + cm sync.Mutex + gm sync.Mutex + tm sync.Mutex + counters map[string]*int64 + gauges map[string]*int64 + timers map[string]*localBackendTimer + stop chan struct{} + wg sync.WaitGroup + TagsSep string + TagKVSep string +} + +// NewLocalBackend returns a new LocalBackend. The collectionInterval is the histogram +// time window for each timer. +func NewLocalBackend(collectionInterval time.Duration) *LocalBackend { + b := &LocalBackend{ + counters: make(map[string]*int64), + gauges: make(map[string]*int64), + timers: make(map[string]*localBackendTimer), + stop: make(chan struct{}), + TagsSep: "|", + TagKVSep: "=", + } + if collectionInterval == 0 { + // Use one histogram time window for all timers + return b + } + b.wg.Add(1) + go b.runLoop(collectionInterval) + return b +} + +// Clear discards accumulated stats +func (b *LocalBackend) Clear() { + b.cm.Lock() + defer b.cm.Unlock() + b.gm.Lock() + defer b.gm.Unlock() + b.tm.Lock() + defer b.tm.Unlock() + b.counters = make(map[string]*int64) + b.gauges = make(map[string]*int64) + b.timers = make(map[string]*localBackendTimer) +} + +func (b *LocalBackend) runLoop(collectionInterval time.Duration) { + defer b.wg.Done() + ticker := time.NewTicker(collectionInterval) + for { + select { + case <-ticker.C: + b.tm.Lock() + timers := make(map[string]*localBackendTimer, len(b.timers)) + for timerName, timer := range b.timers { + timers[timerName] = timer + } + b.tm.Unlock() + + for _, t := range timers { + t.Lock() + t.hist.Rotate() + t.Unlock() + } + case <-b.stop: + ticker.Stop() + return + } + } +} + +// IncCounter increments a counter value +func (b *LocalBackend) IncCounter(name string, tags map[string]string, delta int64) { + name = GetKey(name, tags, b.TagsSep, b.TagKVSep) + b.cm.Lock() + defer b.cm.Unlock() + counter := b.counters[name] + if counter == nil { + b.counters[name] = new(int64) + *b.counters[name] = delta + return + } + atomic.AddInt64(counter, delta) +} + +// UpdateGauge updates the value of a gauge +func (b *LocalBackend) UpdateGauge(name string, tags map[string]string, value int64) { + name = GetKey(name, tags, b.TagsSep, b.TagKVSep) + b.gm.Lock() + defer b.gm.Unlock() + gauge := b.gauges[name] + if gauge == nil { + b.gauges[name] = new(int64) + *b.gauges[name] = value + return + } + atomic.StoreInt64(gauge, value) +} + +// RecordTimer records a timing duration +func (b *LocalBackend) RecordTimer(name string, tags map[string]string, d time.Duration) { + name = GetKey(name, tags, b.TagsSep, b.TagKVSep) + timer := b.findOrCreateTimer(name) + timer.Lock() + timer.hist.Current.RecordValue(int64(d / time.Millisecond)) + timer.Unlock() +} + +func (b *LocalBackend) findOrCreateTimer(name string) *localBackendTimer { + b.tm.Lock() + defer b.tm.Unlock() + if t, ok := b.timers[name]; ok { + return t + } + + t := &localBackendTimer{ + hist: hdrhistogram.NewWindowed(5, 0, int64((5*time.Minute)/time.Millisecond), 1), + } + b.timers[name] = t + return t +} + +type localBackendTimer struct { + sync.Mutex + hist *hdrhistogram.WindowedHistogram +} + +var ( + percentiles = map[string]float64{ + "P50": 50, + "P75": 75, + "P90": 90, + "P95": 95, + "P99": 99, + "P999": 99.9, + } +) + +// Snapshot captures a snapshot of the current counter and gauge values +func (b *LocalBackend) Snapshot() (counters, gauges map[string]int64) { + b.cm.Lock() + defer b.cm.Unlock() + + counters = make(map[string]int64, len(b.counters)) + for name, value := range b.counters { + counters[name] = atomic.LoadInt64(value) + } + + b.gm.Lock() + defer b.gm.Unlock() + + gauges = make(map[string]int64, len(b.gauges)) + for name, value := range b.gauges { + gauges[name] = atomic.LoadInt64(value) + } + + b.tm.Lock() + timers := make(map[string]*localBackendTimer) + for timerName, timer := range b.timers { + timers[timerName] = timer + } + b.tm.Unlock() + + for timerName, timer := range timers { + timer.Lock() + hist := timer.hist.Merge() + timer.Unlock() + for name, q := range percentiles { + gauges[timerName+"."+name] = hist.ValueAtQuantile(q) + } + } + + return +} + +// Stop cleanly closes the background goroutine spawned by NewLocalBackend. +func (b *LocalBackend) Stop() { + close(b.stop) + b.wg.Wait() +} + +// GetKey converts name+tags into a single string of the form +// "name|tag1=value1|...|tagN=valueN", where tag names are +// sorted alphabetically. +func GetKey(name string, tags map[string]string, tagsSep string, tagKVSep string) string { + keys := make([]string, 0, len(tags)) + for k := range tags { + keys = append(keys, k) + } + sort.Strings(keys) + key := name + for _, k := range keys { + key = key + tagsSep + k + tagKVSep + tags[k] + } + return key +} + +type stats struct { + name string + tags map[string]string + localBackend *LocalBackend +} + +type localTimer struct { + stats +} + +func (l *localTimer) Record(d time.Duration) { + l.localBackend.RecordTimer(l.name, l.tags, d) +} + +type localCounter struct { + stats +} + +func (l *localCounter) Inc(delta int64) { + l.localBackend.IncCounter(l.name, l.tags, delta) +} + +type localGauge struct { + stats +} + +func (l *localGauge) Update(value int64) { + l.localBackend.UpdateGauge(l.name, l.tags, value) +} + +// LocalFactory stats factory that creates metrics that are stored locally +type LocalFactory struct { + *LocalBackend + namespace string + tags map[string]string +} + +// NewLocalFactory returns a new LocalMetricsFactory +func NewLocalFactory(collectionInterval time.Duration) *LocalFactory { + return &LocalFactory{ + LocalBackend: NewLocalBackend(collectionInterval), + } +} + +// appendTags adds the tags to the namespace tags and returns a combined map. +func (l *LocalFactory) appendTags(tags map[string]string) map[string]string { + newTags := make(map[string]string) + for k, v := range l.tags { + newTags[k] = v + } + for k, v := range tags { + newTags[k] = v + } + return newTags +} + +func (l *LocalFactory) newNamespace(name string) string { + if l.namespace == "" { + return name + } + + if name == "" { + return l.namespace + } + + return l.namespace + "." + name +} + +// Counter returns a local stats counter +func (l *LocalFactory) Counter(name string, tags map[string]string) Counter { + return &localCounter{ + stats{ + name: l.newNamespace(name), + tags: l.appendTags(tags), + localBackend: l.LocalBackend, + }, + } +} + +// Timer returns a local stats timer. +func (l *LocalFactory) Timer(name string, tags map[string]string) Timer { + return &localTimer{ + stats{ + name: l.newNamespace(name), + tags: l.appendTags(tags), + localBackend: l.LocalBackend, + }, + } +} + +// Gauge returns a local stats gauge. +func (l *LocalFactory) Gauge(name string, tags map[string]string) Gauge { + return &localGauge{ + stats{ + name: l.newNamespace(name), + tags: l.appendTags(tags), + localBackend: l.LocalBackend, + }, + } +} + +// Namespace returns a new namespace. +func (l *LocalFactory) Namespace(name string, tags map[string]string) Factory { + return &LocalFactory{ + namespace: l.newNamespace(name), + tags: l.appendTags(tags), + LocalBackend: l.LocalBackend, + } +} diff --git a/vendor/github.com/uber/jaeger-lib/metrics/metrics.go b/vendor/github.com/uber/jaeger-lib/metrics/metrics.go new file mode 100644 index 000000000000..0b97707b075e --- /dev/null +++ b/vendor/github.com/uber/jaeger-lib/metrics/metrics.go @@ -0,0 +1,85 @@ +// Copyright (c) 2017 Uber Technologies, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package metrics + +import ( + "fmt" + "reflect" + "strings" +) + +// Init initializes the passed in metrics and initializes its fields using the passed in factory. +func Init(metrics interface{}, factory Factory, globalTags map[string]string) { + if err := initMetrics(metrics, factory, globalTags); err != nil { + panic(err.Error()) + } +} + +// initMetrics uses reflection to initialize a struct containing metrics fields +// by assigning new Counter/Gauge/Timer values with the metric name retrieved +// from the `metric` tag and stats tags retrieved from the `tags` tag. +// +// Note: all fields of the struct must be exported, have a `metric` tag, and be +// of type Counter or Gauge or Timer. +func initMetrics(m interface{}, factory Factory, globalTags map[string]string) error { + // Allow user to opt out of reporting metrics by passing in nil. + if factory == nil { + factory = NullFactory + } + + counterPtrType := reflect.TypeOf((*Counter)(nil)).Elem() + gaugePtrType := reflect.TypeOf((*Gauge)(nil)).Elem() + timerPtrType := reflect.TypeOf((*Timer)(nil)).Elem() + + v := reflect.ValueOf(m).Elem() + t := v.Type() + for i := 0; i < t.NumField(); i++ { + tags := make(map[string]string) + for k, v := range globalTags { + tags[k] = v + } + field := t.Field(i) + metric := field.Tag.Get("metric") + if metric == "" { + return fmt.Errorf("Field %s is missing a tag 'metric'", field.Name) + } + if tagString := field.Tag.Get("tags"); tagString != "" { + tagPairs := strings.Split(tagString, ",") + for _, tagPair := range tagPairs { + tag := strings.Split(tagPair, "=") + if len(tag) != 2 { + return fmt.Errorf( + "Field [%s]: Tag [%s] is not of the form key=value in 'tags' string [%s]", + field.Name, tagPair, tagString) + } + tags[tag[0]] = tag[1] + } + } + var obj interface{} + if field.Type.AssignableTo(counterPtrType) { + obj = factory.Counter(metric, tags) + } else if field.Type.AssignableTo(gaugePtrType) { + obj = factory.Gauge(metric, tags) + } else if field.Type.AssignableTo(timerPtrType) { + obj = factory.Timer(metric, tags) + } else { + return fmt.Errorf( + "Field %s is not a pointer to timer, gauge, or counter", + field.Name) + } + v.Field(i).Set(reflect.ValueOf(obj)) + } + return nil +} diff --git a/vendor/github.com/uber/jaeger-lib/metrics/stopwatch.go b/vendor/github.com/uber/jaeger-lib/metrics/stopwatch.go new file mode 100644 index 000000000000..4a8abdb539f9 --- /dev/null +++ b/vendor/github.com/uber/jaeger-lib/metrics/stopwatch.go @@ -0,0 +1,43 @@ +// Copyright (c) 2017 Uber Technologies, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package metrics + +import ( + "time" +) + +// StartStopwatch begins recording the executing time of an event, returning +// a Stopwatch that should be used to stop the recording the time for +// that event. Multiple events can be occurring simultaneously each +// represented by different active Stopwatches +func StartStopwatch(timer Timer) Stopwatch { + return Stopwatch{t: timer, start: time.Now()} +} + +// A Stopwatch tracks the execution time of a specific event +type Stopwatch struct { + t Timer + start time.Time +} + +// Stop stops executing of the stopwatch and records the amount of elapsed time +func (s Stopwatch) Stop() { + s.t.Record(s.ElapsedTime()) +} + +// ElapsedTime returns the amount of elapsed time (in time.Duration) +func (s Stopwatch) ElapsedTime() time.Duration { + return time.Since(s.start) +} diff --git a/vendor/github.com/uber/jaeger-lib/metrics/timer.go b/vendor/github.com/uber/jaeger-lib/metrics/timer.go new file mode 100644 index 000000000000..e18d222abb4a --- /dev/null +++ b/vendor/github.com/uber/jaeger-lib/metrics/timer.go @@ -0,0 +1,33 @@ +// Copyright (c) 2017 Uber Technologies, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package metrics + +import ( + "time" +) + +// Timer accumulates observations about how long some operation took, +// and also maintains a historgam of percentiles. +type Timer interface { + // Records the time passed in. + Record(time.Duration) +} + +// NullTimer timer that does nothing +var NullTimer Timer = nullTimer{} + +type nullTimer struct{} + +func (nullTimer) Record(time.Duration) {} diff --git a/vendor/golang.org/x/sys/cpu/cpu.go b/vendor/golang.org/x/sys/cpu/cpu.go new file mode 100644 index 000000000000..3d88f866739e --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu.go @@ -0,0 +1,38 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package cpu implements processor feature detection for +// various CPU architectures. +package cpu + +// CacheLinePad is used to pad structs to avoid false sharing. +type CacheLinePad struct{ _ [cacheLineSize]byte } + +// X86 contains the supported CPU features of the +// current X86/AMD64 platform. If the current platform +// is not X86/AMD64 then all feature flags are false. +// +// X86 is padded to avoid false sharing. Further the HasAVX +// and HasAVX2 are only set if the OS supports XMM and YMM +// registers in addition to the CPUID feature bit being set. +var X86 struct { + _ CacheLinePad + HasAES bool // AES hardware implementation (AES NI) + HasADX bool // Multi-precision add-carry instruction extensions + HasAVX bool // Advanced vector extension + HasAVX2 bool // Advanced vector extension 2 + HasBMI1 bool // Bit manipulation instruction set 1 + HasBMI2 bool // Bit manipulation instruction set 2 + HasERMS bool // Enhanced REP for MOVSB and STOSB + HasFMA bool // Fused-multiply-add instructions + HasOSXSAVE bool // OS supports XSAVE/XRESTOR for saving/restoring XMM registers. + HasPCLMULQDQ bool // PCLMULQDQ instruction - most often used for AES-GCM + HasPOPCNT bool // Hamming weight instruction POPCNT. + HasSSE2 bool // Streaming SIMD extension 2 (always available on amd64) + HasSSE3 bool // Streaming SIMD extension 3 + HasSSSE3 bool // Supplemental streaming SIMD extension 3 + HasSSE41 bool // Streaming SIMD extension 4 and 4.1 + HasSSE42 bool // Streaming SIMD extension 4 and 4.2 + _ CacheLinePad +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_arm.go b/vendor/golang.org/x/sys/cpu/cpu_arm.go new file mode 100644 index 000000000000..d93036f7522b --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_arm.go @@ -0,0 +1,7 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cpu + +const cacheLineSize = 32 diff --git a/vendor/golang.org/x/sys/cpu/cpu_arm64.go b/vendor/golang.org/x/sys/cpu/cpu_arm64.go new file mode 100644 index 000000000000..1d2ab2902a76 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_arm64.go @@ -0,0 +1,7 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cpu + +const cacheLineSize = 64 diff --git a/vendor/golang.org/x/sys/cpu/cpu_gc_x86.go b/vendor/golang.org/x/sys/cpu/cpu_gc_x86.go new file mode 100644 index 000000000000..f7cb46971cb0 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_gc_x86.go @@ -0,0 +1,16 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build 386 amd64 amd64p32 +// +build !gccgo + +package cpu + +// cpuid is implemented in cpu_x86.s for gc compiler +// and in cpu_gccgo.c for gccgo. +func cpuid(eaxArg, ecxArg uint32) (eax, ebx, ecx, edx uint32) + +// xgetbv with ecx = 0 is implemented in cpu_x86.s for gc compiler +// and in cpu_gccgo.c for gccgo. +func xgetbv() (eax, edx uint32) diff --git a/vendor/golang.org/x/sys/cpu/cpu_gccgo.c b/vendor/golang.org/x/sys/cpu/cpu_gccgo.c new file mode 100644 index 000000000000..e363c7d13197 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_gccgo.c @@ -0,0 +1,43 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build 386 amd64 amd64p32 +// +build gccgo + +#include +#include + +// Need to wrap __get_cpuid_count because it's declared as static. +int +gccgoGetCpuidCount(uint32_t leaf, uint32_t subleaf, + uint32_t *eax, uint32_t *ebx, + uint32_t *ecx, uint32_t *edx) +{ + return __get_cpuid_count(leaf, subleaf, eax, ebx, ecx, edx); +} + +// xgetbv reads the contents of an XCR (Extended Control Register) +// specified in the ECX register into registers EDX:EAX. +// Currently, the only supported value for XCR is 0. +// +// TODO: Replace with a better alternative: +// +// #include +// +// #pragma GCC target("xsave") +// +// void gccgoXgetbv(uint32_t *eax, uint32_t *edx) { +// unsigned long long x = _xgetbv(0); +// *eax = x & 0xffffffff; +// *edx = (x >> 32) & 0xffffffff; +// } +// +// Note that _xgetbv is defined starting with GCC 8. +void +gccgoXgetbv(uint32_t *eax, uint32_t *edx) +{ + __asm(" xorl %%ecx, %%ecx\n" + " xgetbv" + : "=a"(*eax), "=d"(*edx)); +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_gccgo.go b/vendor/golang.org/x/sys/cpu/cpu_gccgo.go new file mode 100644 index 000000000000..ba49b91bd398 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_gccgo.go @@ -0,0 +1,26 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build 386 amd64 amd64p32 +// +build gccgo + +package cpu + +//extern gccgoGetCpuidCount +func gccgoGetCpuidCount(eaxArg, ecxArg uint32, eax, ebx, ecx, edx *uint32) + +func cpuid(eaxArg, ecxArg uint32) (eax, ebx, ecx, edx uint32) { + var a, b, c, d uint32 + gccgoGetCpuidCount(eaxArg, ecxArg, &a, &b, &c, &d) + return a, b, c, d +} + +//extern gccgoXgetbv +func gccgoXgetbv(eax, edx *uint32) + +func xgetbv() (eax, edx uint32) { + var a, d uint32 + gccgoXgetbv(&a, &d) + return a, d +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_mips64x.go b/vendor/golang.org/x/sys/cpu/cpu_mips64x.go new file mode 100644 index 000000000000..6165f121249a --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_mips64x.go @@ -0,0 +1,9 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build mips64 mips64le + +package cpu + +const cacheLineSize = 32 diff --git a/vendor/golang.org/x/sys/cpu/cpu_mipsx.go b/vendor/golang.org/x/sys/cpu/cpu_mipsx.go new file mode 100644 index 000000000000..1269eee88d00 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_mipsx.go @@ -0,0 +1,9 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build mips mipsle + +package cpu + +const cacheLineSize = 32 diff --git a/vendor/golang.org/x/sys/cpu/cpu_ppc64x.go b/vendor/golang.org/x/sys/cpu/cpu_ppc64x.go new file mode 100644 index 000000000000..d10759a524f2 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_ppc64x.go @@ -0,0 +1,9 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ppc64 ppc64le + +package cpu + +const cacheLineSize = 128 diff --git a/vendor/golang.org/x/sys/cpu/cpu_s390x.go b/vendor/golang.org/x/sys/cpu/cpu_s390x.go new file mode 100644 index 000000000000..684c4f005d09 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_s390x.go @@ -0,0 +1,7 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cpu + +const cacheLineSize = 256 diff --git a/vendor/golang.org/x/sys/cpu/cpu_x86.go b/vendor/golang.org/x/sys/cpu/cpu_x86.go new file mode 100644 index 000000000000..71e288b06223 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_x86.go @@ -0,0 +1,55 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build 386 amd64 amd64p32 + +package cpu + +const cacheLineSize = 64 + +func init() { + maxID, _, _, _ := cpuid(0, 0) + + if maxID < 1 { + return + } + + _, _, ecx1, edx1 := cpuid(1, 0) + X86.HasSSE2 = isSet(26, edx1) + + X86.HasSSE3 = isSet(0, ecx1) + X86.HasPCLMULQDQ = isSet(1, ecx1) + X86.HasSSSE3 = isSet(9, ecx1) + X86.HasFMA = isSet(12, ecx1) + X86.HasSSE41 = isSet(19, ecx1) + X86.HasSSE42 = isSet(20, ecx1) + X86.HasPOPCNT = isSet(23, ecx1) + X86.HasAES = isSet(25, ecx1) + X86.HasOSXSAVE = isSet(27, ecx1) + + osSupportsAVX := false + // For XGETBV, OSXSAVE bit is required and sufficient. + if X86.HasOSXSAVE { + eax, _ := xgetbv() + // Check if XMM and YMM registers have OS support. + osSupportsAVX = isSet(1, eax) && isSet(2, eax) + } + + X86.HasAVX = isSet(28, ecx1) && osSupportsAVX + + if maxID < 7 { + return + } + + _, ebx7, _, _ := cpuid(7, 0) + X86.HasBMI1 = isSet(3, ebx7) + X86.HasAVX2 = isSet(5, ebx7) && osSupportsAVX + X86.HasBMI2 = isSet(8, ebx7) + X86.HasERMS = isSet(9, ebx7) + X86.HasADX = isSet(19, ebx7) +} + +func isSet(bitpos uint, value uint32) bool { + return value&(1<= f.PoW } -func matchSingleTopic(topic TopicType, bt []byte) bool { - if len(bt) > TopicLength { - bt = bt[:TopicLength] - } - - if len(bt) < TopicLength { - return false - } - - for j, b := range bt { - if topic[j] != b { - return false - } - } - return true -} - // IsPubKeyEqual checks that two public keys are equal func IsPubKeyEqual(a, b *ecdsa.PublicKey) bool { if !ValidatePublicKey(a) { diff --git a/whisper/whisperv6/filter_test.go b/whisper/whisperv6/filter_test.go index 0bb7986c390b..82e4aa024148 100644 --- a/whisper/whisperv6/filter_test.go +++ b/whisper/whisperv6/filter_test.go @@ -829,39 +829,3 @@ func TestVariableTopics(t *testing.T) { } } } - -func TestMatchSingleTopic_ReturnTrue(t *testing.T) { - bt := []byte("test") - topic := BytesToTopic(bt) - - if !matchSingleTopic(topic, bt) { - t.FailNow() - } -} - -func TestMatchSingleTopic_WithTail_ReturnTrue(t *testing.T) { - bt := []byte("test with tail") - topic := BytesToTopic([]byte("test")) - - if !matchSingleTopic(topic, bt) { - t.FailNow() - } -} - -func TestMatchSingleTopic_NotEquals_ReturnFalse(t *testing.T) { - bt := []byte("tes") - topic := BytesToTopic(bt) - - if matchSingleTopic(topic, bt) { - t.FailNow() - } -} - -func TestMatchSingleTopic_InsufficientLength_ReturnFalse(t *testing.T) { - bt := []byte("test") - topic := BytesToTopic([]byte("not_equal")) - - if matchSingleTopic(topic, bt) { - t.FailNow() - } -} diff --git a/whisper/whisperv6/peer.go b/whisper/whisperv6/peer.go index 2bf1c905b232..79cc21270826 100644 --- a/whisper/whisperv6/peer.go +++ b/whisper/whisperv6/peer.go @@ -22,11 +22,11 @@ import ( "sync" "time" + mapset "github.com/deckarep/golang-set" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/p2p" "github.com/ethereum/go-ethereum/rlp" - set "gopkg.in/fatih/set.v0" ) // Peer represents a whisper protocol peer connection. @@ -41,7 +41,7 @@ type Peer struct { bloomFilter []byte fullNode bool - known *set.Set // Messages already known by the peer to avoid wasting bandwidth + known mapset.Set // Messages already known by the peer to avoid wasting bandwidth quit chan struct{} } @@ -54,7 +54,7 @@ func newPeer(host *Whisper, remote *p2p.Peer, rw p2p.MsgReadWriter) *Peer { ws: rw, trusted: false, powRequirement: 0.0, - known: set.New(), + known: mapset.NewSet(), quit: make(chan struct{}), bloomFilter: MakeFullNodeBloom(), fullNode: true, @@ -165,7 +165,7 @@ func (peer *Peer) mark(envelope *Envelope) { // marked checks if an envelope is already known to the remote peer. func (peer *Peer) marked(envelope *Envelope) bool { - return peer.known.Has(envelope.Hash()) + return peer.known.Contains(envelope.Hash()) } // expire iterates over all the known envelopes in the host and removes all diff --git a/whisper/whisperv6/whisper.go b/whisper/whisperv6/whisper.go index 3418c7bf7d36..76d485808dfd 100644 --- a/whisper/whisperv6/whisper.go +++ b/whisper/whisperv6/whisper.go @@ -26,6 +26,7 @@ import ( "sync" "time" + mapset "github.com/deckarep/golang-set" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/log" @@ -35,7 +36,6 @@ import ( "github.com/syndtr/goleveldb/leveldb/errors" "golang.org/x/crypto/pbkdf2" "golang.org/x/sync/syncmap" - set "gopkg.in/fatih/set.v0" ) // Statistics holds several message-related counter for analytics @@ -69,7 +69,7 @@ type Whisper struct { poolMu sync.RWMutex // Mutex to sync the message and expiration pools envelopes map[common.Hash]*Envelope // Pool of envelopes currently tracked by this node - expirations map[uint32]*set.SetNonTS // Message expiration pool + expirations map[uint32]mapset.Set // Message expiration pool peerMu sync.RWMutex // Mutex to sync the active peer set peers map[*Peer]struct{} // Set of currently active peers @@ -100,7 +100,7 @@ func New(cfg *Config) *Whisper { privateKeys: make(map[string]*ecdsa.PrivateKey), symKeys: make(map[string][]byte), envelopes: make(map[common.Hash]*Envelope), - expirations: make(map[uint32]*set.SetNonTS), + expirations: make(map[uint32]mapset.Set), peers: make(map[*Peer]struct{}), messageQueue: make(chan *Envelope, messageQueueLimit), p2pMsgQueue: make(chan *Envelope, messageQueueLimit), @@ -796,9 +796,9 @@ func (whisper *Whisper) add(envelope *Envelope, isP2P bool) (bool, error) { if !alreadyCached { whisper.envelopes[hash] = envelope if whisper.expirations[envelope.Expiry] == nil { - whisper.expirations[envelope.Expiry] = set.NewNonTS() + whisper.expirations[envelope.Expiry] = mapset.NewThreadUnsafeSet() } - if !whisper.expirations[envelope.Expiry].Has(hash) { + if !whisper.expirations[envelope.Expiry].Contains(hash) { whisper.expirations[envelope.Expiry].Add(hash) } }